hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
11bf9768085fea30bca0df46d4564990f9bc27a8 | 25,996 | use std::cmp::Ordering;
use std::ops::Range;
/// `Position` represents a two-dimensional position which has line and column.
#[derive(PartialEq, Debug, Clone, Copy)]
pub struct Position {
/// A line number. It is in a range [0, _the number of lines_).
pub line: usize,
/// A column number. It is in a range [0, _length at `line`_).
pub column: usize,
}
impl Position {
/// Creates a new `Position`.
///
/// # Examples
///
/// ```
/// # use edit::editor::Position;
/// let p = Position::new(1, 2);
/// ```
pub fn new(line: usize, column: usize) -> Position {
Position { line, column }
}
}
impl PartialOrd for Position {
fn partial_cmp(&self, other: &Position) -> Option<Ordering> {
let lc = self.line.partial_cmp(&other.line);
if lc != Some(Ordering::Equal) {
return lc;
}
self.column.partial_cmp(&other.column)
}
}
#[derive(PartialEq, Debug)]
pub struct Core {
buffer: Vec<char>,
newline_indices: Vec<usize>,
line: usize,
column: usize,
}
impl Core {
pub fn new(buffer: &str, line: usize, column: usize) -> Result<Core, String> {
let chars: Vec<char> = buffer.chars().collect();
let mut indices: Vec<usize> = chars
.iter()
.enumerate()
.filter_map(|(i, ch)| if *ch == '\n' { Some(i) } else { None })
.collect();
let char_count = chars.len();
if indices.last().map(|n| n + 1) != Some(char_count) {
indices.push(char_count);
}
if indices.len() <= line {
return Err(format!(
"Line {} is out of range [0, {})",
line,
indices.len()
));
}
let width = indices[line] - if line == 0 { 0 } else { indices[line - 1] + 1 };
if width < column {
return Err(format!("Column {} is out of range [0, {}]", column, width));
}
Ok(Core {
buffer: chars,
newline_indices: indices,
line,
column,
})
}
pub fn reset(&mut self, buffer: &str, line: usize, column: usize) {
let chars: Vec<char> = buffer.chars().collect();
let mut indices: Vec<usize> = chars
.iter()
.enumerate()
.filter_map(|(i, ch)| if *ch == '\n' { Some(i) } else { None })
.collect();
let char_count = chars.len();
if indices.last() != Some(&char_count) {
indices.push(char_count);
}
assert!(indices.len() > line);
let width = indices[line] - if line == 0 { 0 } else { indices[line - 1] + 1 };
assert!(width >= column);
self.buffer = chars;
self.newline_indices = indices;
self.line = line;
self.column = column;
}
pub fn buffer(&self) -> &[char] {
&self.buffer
}
pub fn line(&self) -> usize {
self.line
}
pub fn column(&self) -> usize {
self.column
}
pub fn line_count(&self) -> usize {
self.newline_indices.len()
}
pub fn line_width(&self, n: usize) -> Option<usize> {
if n >= self.line_count() {
return None;
}
let right = self.newline_indices[n];
if n == 0 {
return Some(right);
}
let left = self.newline_indices[n - 1] + 1;
Some(right - left)
}
pub fn current_line_width(&self) -> usize {
self.line_width(self.line).expect(&format!(
"current_line_width: unexpected error (line: {})",
self.line
))
}
pub fn offset(&self, line: usize, column: usize) -> Option<usize> {
if line >= self.line_count() || self.line_width(line).unwrap() < column {
return None;
}
if line == 0 {
return Some(column);
}
let line_offset = self.newline_indices[line - 1] + 1;
Some(line_offset + column)
}
pub fn current_offset(&self) -> usize {
self.offset(self.line, self.column).expect(&format!(
"current_offset: unexpected error (line: {}, column: {})",
self.line,
self.column,
))
}
pub fn set_column(&mut self, n: usize) {
if n <= self.current_line_width() {
self.column = n;
}
}
pub fn set_line(&mut self, n: usize) {
if n < self.line_count() {
self.line = n;
}
let width = self.current_line_width();
if width < self.column {
self.column = width;
}
}
pub fn move_left(&mut self, n: usize) {
if self.column < n {
self.column = 0;
return;
}
self.column -= n;
}
pub fn move_right(&mut self, n: usize) {
let width = self.current_line_width();
if self.column + n >= width {
self.column = width;
return;
}
self.column += n;
}
pub fn move_up(&mut self, n: usize) {
if self.line < n {
self.line = 0;
} else {
self.line -= n;
}
let width = self.current_line_width();
if width < self.column {
self.column = width;
}
}
pub fn move_down(&mut self, n: usize) {
if self.line + n >= self.line_count() {
self.line = self.line_count() - 1;
} else {
self.line += n;
}
let width = self.current_line_width();
if width < self.column {
self.column = width;
}
}
pub fn insert_at(&mut self, ch: char, line: usize, column: usize) {
let offset = self.offset(line, column);
if offset.is_none() {
return;
}
let i = offset.unwrap();
let current_offset = self.current_offset();
self.buffer.insert(i, ch);
for x in self.newline_indices[line..].iter_mut() {
*x += 1
}
if ch == '\n' {
self.newline_indices.insert(line, i);
}
if ch == '\n' && i <= current_offset {
if self.line == line {
self.column = current_offset - i;
}
self.line += 1;
return;
}
if line == self.line && column <= self.column {
self.column += 1;
}
}
pub fn insert_string_at(&mut self, s: &str, line: usize, column: usize) {
for ch in s.chars().rev() {
self.insert_at(ch, line, column)
}
}
pub fn delete_at(&mut self, line: usize, column: usize) {
let line_width = self.line_width(line);
if line_width.is_none() {
return;
}
let line_width = line_width.unwrap();
if self.line_count() <= line || line_width < column {
return;
}
let current_offset = self.current_offset();
let width = self.line_width(line).expect(&format!("width: {}", line));
let offset = self.offset(line, column).expect(&format!(
"offset: {} {}",
line,
column
));
let ch = self.buffer.remove(offset);
if ch == '\n' {
self.newline_indices.remove(line);
}
for x in self.newline_indices[line..].iter_mut() {
*x -= 1
}
if ch == '\n' && offset < current_offset {
self.line -= 1;
if self.line == line {
self.column = width + current_offset - offset - 1;
}
return;
}
if line != self.line {
return;
}
if column < self.column {
self.column -= 1;
}
}
pub fn delete_range(&mut self, range: Range<Position>) {
let start = self.offset(range.start.line, range.start.column).expect(
&format!(
"out of range: {:?}",
range
.start
),
);
let n = self.offset(range.end.line, range.end.column).expect(
&format!(
"out of range: {:?}",
range.end
),
) - start;
for _ in 0..n {
self.delete_at(range.start.line, range.start.column)
}
}
pub fn next_position(&self, f: fn(char) -> bool) -> Option<Position> {
let off = self.current_offset();
let indices = &self.newline_indices[self.line..];
let mut it = self.buffer[off..].iter();
let p = it.position(|&ch| !f(ch));
if p.is_none() {
return None;
}
let p = p.unwrap();
it.position(|&ch| f(ch)).map(|n| n + off + p).map(|n| {
let i = indices.iter().position(|&x| n < x).expect(
"next_position: unexpected error",
) + self.line;
if i == self.line {
return Position::new(i, self.column + n - off + 1);
}
Position::new(i, n - self.newline_indices[i - 1])
})
}
pub fn previous_position(&self, f: fn(char) -> bool) -> Option<Position> {
let off = self.current_offset();
let indices = &self.newline_indices[..self.line];
let mut it = self.buffer[..off].iter();
if it.rposition(|&ch| f(ch)).is_none() {
return None;
}
it.rposition(|&ch| !f(ch))
.map(|n| n + 1)
.map(|n| {
let i = indices.iter().rposition(|&x| n > x);
if i == None {
return Position::new(0, n);
}
let i = i.unwrap();
Position::new(i + 1, n - self.newline_indices[i] - 1)
})
.or(Some(Position::new(0, 0)))
}
pub fn next_keyword_position(&self) -> Option<Position> {
self.next_position(char::is_alphanumeric)
}
pub fn previous_keyword_position(&self) -> Option<Position> {
self.previous_position(char::is_alphanumeric)
}
pub fn next_symbol_position(&self) -> Option<Position> {
self.next_position(char::is_symbol)
}
pub fn previous_symbol_position(&self) -> Option<Position> {
self.previous_position(char::is_symbol)
}
pub fn next_end_position(&self, f: fn(char) -> bool) -> Option<Position> {
let off = self.current_offset();
let indices = &self.newline_indices[self.line..];
let mut it = self.buffer[off..].iter();
let p = it.position(|&ch| f(ch));
if p.is_none() {
return None;
}
let p = p.unwrap();
it.position(|&ch| !f(ch)).map(|n| n + off + p - 1).map(|n| {
let i = indices.iter().position(|&x| n < x).expect(
"next_end_position: unexpected error",
) + self.line;
if i == self.line {
return Position::new(i, self.column + n - off + 1);
}
Position::new(i, n - self.newline_indices[i - 1])
})
}
pub fn previous_end_position(&self, f: fn(char) -> bool) -> Option<Position> {
let off = self.current_offset();
let indices = &self.newline_indices[..self.line];
let mut it = self.buffer[..off].iter();
if it.rposition(|&ch| !f(ch)).is_none() {
return None;
}
it.rposition(|&ch| f(ch))
.map(|n| {
let i = indices.iter().rposition(|&x| n > x);
if i == None {
return Position::new(0, n);
}
let i = i.unwrap();
Position::new(i, n - self.newline_indices[i])
})
.or(Some(Position::new(0, 0)))
}
pub fn next_keyword_end_position(&self) -> Option<Position> {
self.next_end_position(char::is_alphanumeric)
}
pub fn previous_keyword_end_position(&self) -> Option<Position> {
self.previous_end_position(char::is_alphanumeric)
}
pub fn next_symbol_end_position(&self) -> Option<Position> {
self.next_end_position(char::is_symbol)
}
pub fn previous_symbol_end_position(&self) -> Option<Position> {
self.previous_end_position(char::is_symbol)
}
pub fn after_position(&self, f: fn(char) -> bool) -> Option<Position> {
self.next_end_position(f).map(|p| if p.column <
self.line_width(p.line)
.unwrap()
{
Position::new(p.line, p.column + 1)
} else {
Position::new(p.line + 1, 0)
})
}
pub fn before_position(&self, f: fn(char) -> bool) -> Option<Position> {
self.previous_position(f).and_then(|p| if p.column == 0 {
if p.line == 0 {
None
} else {
Some(Position::new(p.line - 1, 0))
}
} else {
Some(Position::new(p.line, p.column - 1))
})
}
pub fn after_keyword_position(&self) -> Option<Position> {
self.after_position(char::is_alphanumeric)
}
pub fn before_keyword_position(&self) -> Option<Position> {
self.before_position(char::is_alphanumeric)
}
pub fn after_symbol_position(&self) -> Option<Position> {
self.after_position(char::is_symbol)
}
pub fn before_symbol_position(&self) -> Option<Position> {
self.before_position(char::is_symbol)
}
}
impl Clone for Core {
fn clone(&self) -> Core {
Core {
buffer: self.buffer.clone(),
line: self.line,
column: self.column,
newline_indices: self.newline_indices.clone(),
}
}
}
trait Character {
fn is_symbol(self) -> bool;
}
impl Character for char {
fn is_symbol(self) -> bool {
!(self.is_alphanumeric() || self.is_whitespace())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_new() {
let buffer = "Hello, world!\nThe 2nd line.";
let editor = Core::new(buffer, 10, 10);
assert!(editor.is_err());
}
#[test]
fn test_line_count() {
let buffer = "Hello, world!\nThe 2nd line.";
let editor = Core::new(buffer, 0, 0).unwrap();
assert_eq!(editor.line_count(), 2);
let editor = Core::new("", 0, 0).unwrap();
assert_eq!(editor.line_count(), 1);
let editor = Core::new("aaa bbb", 0, 0).unwrap();
assert_eq!(editor.line_count(), 1);
}
#[test]
fn test_line_width() {
let buffer = "Hello, world!\nThe 2nd line.";
let editor = Core::new(buffer, 0, 0).unwrap();
assert_eq!(editor.line_width(0), Some(13));
assert_eq!(editor.line_width(1), Some(13));
assert_eq!(editor.line_width(2), None);
let editor = Core::new("", 0, 0).unwrap();
assert_eq!(editor.line_width(0), Some(0));
assert_eq!(editor.line_width(1), None);
let editor = Core::new("aaa bbb", 0, 0).unwrap();
assert_eq!(editor.line_width(0), Some(7));
assert_eq!(editor.line_width(1), None);
let buffer = "世界";
let editor = Core::new(buffer, 0, 0).unwrap();
assert_eq!(editor.line_width(0), Some(2));
assert_eq!(editor.line_width(1), None);
}
#[test]
fn test_offset() {
let buffer = "Hello, world!\nThe 2nd line.";
let editor = Core::new(buffer, 0, 0).unwrap();
assert_eq!(editor.offset(0, 0), Some(0));
assert_eq!(editor.offset(1, 1), Some(15));
assert_eq!(editor.offset(2, 2), None);
assert_eq!(editor.offset(1, 13), Some(27));
assert_eq!(editor.offset(1, 14), None);
assert_eq!(editor.offset(0, 13), Some(13));
assert_eq!(editor.offset(0, 14), None);
let editor = Core::new("", 0, 0).unwrap();
assert_eq!(editor.offset(0, 0), Some(0));
assert_eq!(editor.offset(0, 1), None);
assert_eq!(editor.offset(1, 0), None);
assert_eq!(editor.offset(1, 1), None);
assert_eq!(editor.offset(10, 10), None);
let editor = Core::new("aaa bbb", 0, 0).unwrap();
assert_eq!(editor.offset(0, 0), Some(0));
assert_eq!(editor.offset(0, 1), Some(1));
assert_eq!(editor.offset(1, 0), None);
assert_eq!(editor.offset(1, 1), None);
assert_eq!(editor.offset(10, 10), None);
assert_eq!(editor.offset(0, 7), Some(7));
assert_eq!(editor.offset(0, 8), None);
let buffer = "世界\nabc";
let editor = Core::new(buffer, 0, 0).unwrap();
assert_eq!(editor.offset(0, 0), Some(0));
assert_eq!(editor.offset(0, 1), Some(1));
assert_eq!(editor.offset(0, 2), Some(2));
assert_eq!(editor.offset(1, 0), Some(3));
assert_eq!(editor.offset(1, 3), Some(6));
assert_eq!(editor.offset(1, 4), None);
}
#[test]
fn test_move_right() {
let buffer = "Hello, world!\nThe 2nd line.";
let mut editor = Core::new(buffer, 1, 6).unwrap();
let expected = [7, 8, 9, 10, 11, 12, 13, 13];
for i in 0..expected.len() {
editor.move_right(1);
assert_eq!(editor, Core::new(buffer, 1, expected[i]).unwrap());
}
for i in 0..editor.line_width(editor.line()).unwrap() {
let mut editor = Core::new(buffer, 1, i).unwrap();
let width = editor.line_width(editor.line()).unwrap();
editor.move_right(width + 1);
assert_eq!(editor, Core::new(buffer, 1, width).unwrap());
}
let buffer = "世界\nabc";
let mut editor = Core::new(buffer, 0, 0).unwrap();
let expected = [1, 2, 2];
for i in 0..expected.len() {
editor.move_right(1);
assert_eq!(editor, Core::new(buffer, 0, expected[i]).unwrap());
}
}
#[test]
fn test_move_left() {
let buffer = "Hello, world!\nThe 2nd line.";
let mut editor = Core::new(buffer, 1, 6).unwrap();
let expected = [5, 4, 3, 2, 1, 0, 0];
for i in 0..expected.len() {
editor.move_left(1);
assert_eq!(editor, Core::new(buffer, 1, expected[i]).unwrap());
}
for i in 0..editor.line_width(editor.line()).unwrap() {
let mut editor = Core::new(buffer, 1, i).unwrap();
let width = editor.line_width(editor.line()).unwrap();
editor.move_left(width + 1);
assert_eq!(editor, Core::new(buffer, 1, 0).unwrap());
}
let buffer = "abc\nHello, 世界\ndef";
let mut editor = Core::new(buffer, 1, 9).unwrap();
let expected = [7, 5, 3, 1, 0, 0];
for i in 0..expected.len() {
editor.move_left(2);
assert_eq!(editor, Core::new(buffer, 1, expected[i]).unwrap());
}
}
#[test]
fn test_move_up() {
let buffer = "Hello, world!\nThe 2nd line.\nAAABBBCCC.";
let mut editor = Core::new(buffer, 2, 4).unwrap();
let expected = [1, 0, 0];
for i in 0..expected.len() {
editor.move_up(1);
assert_eq!(editor, Core::new(buffer, expected[i], 4).unwrap());
}
for i in 0..editor.line_count() {
let mut editor = Core::new(buffer, i, 1).unwrap();
let count = editor.line_count();
editor.move_up(count);
assert_eq!(editor, Core::new(buffer, 0, 1).unwrap());
}
let buffer = "aaa\nbbbb";
let mut editor = Core::new(buffer, 1, 4).unwrap();
editor.move_up(1);
assert_eq!(editor, Core::new(buffer, 0, 3).unwrap());
}
#[test]
fn test_move_down() {
let buffer = "Hello, world!\nThe 2nd line.\nAAABBBCCC.";
let mut editor = Core::new(buffer, 0, 4).unwrap();
let expected = [1, 2, 2];
for i in 0..expected.len() {
editor.move_down(1);
assert_eq!(editor, Core::new(buffer, expected[i], 4).unwrap());
}
for i in 0..editor.line_count() {
let mut editor = Core::new(buffer, i, 1).unwrap();
let count = editor.line_count();
editor.move_down(count);
assert_eq!(
editor,
Core::new(buffer, buffer.match_indices('\n').count(), 1).unwrap()
);
}
let buffer = "aaaa\nbbb";
let mut editor = Core::new(buffer, 0, 4).unwrap();
editor.move_down(1);
assert_eq!(editor, Core::new(buffer, 1, 3).unwrap());
}
#[test]
fn test_insert_at() {
let buffer = "Hello, world!\nThe 2nd line.\nAAABBBCCC.";
let mut editor = Core::new(buffer, 0, 6).unwrap();
editor.insert_at('\n', 0, 6);
assert_eq!(
editor,
Core::new("Hello,\n world!\nThe 2nd line.\nAAABBBCCC.", 1, 0).unwrap()
);
editor.insert_at('D', 3, 9);
assert_eq!(
editor,
Core::new("Hello,\n world!\nThe 2nd line.\nAAABBBCCCD.", 1, 0).unwrap()
);
editor.insert_at('a', 1, 0);
assert_eq!(
editor,
Core::new("Hello,\na world!\nThe 2nd line.\nAAABBBCCCD.", 1, 1).unwrap()
);
let buffer = "aaa";
let mut editor = Core::new(buffer, 0, 0).unwrap();
editor.insert_at('a', 10, 10);
assert_eq!(editor, Core::new(buffer, 0, 0).unwrap());
let buffer = "💖a";
let mut editor = Core::new(buffer, 0, 0).unwrap();
editor.insert_at('💖', 0, 2);
let want = "💖a💖";
assert_eq!(editor, Core::new(want, 0, 0).unwrap());
}
#[test]
fn test_insert_string_at() {
let buffer = "aaa ccc ddd";
let mut editor = Core::new(buffer, 0, 7).unwrap();
editor.insert_string_at("bbb ", 0, 4);
assert_eq!(editor, Core::new("aaa bbb ccc ddd", 0, 11).unwrap());
}
#[test]
fn test_delete_at() {
let buffer = "Hello, world!\nThe 2nd line.\nAAABBBCCC.";
let mut editor = Core::new(buffer, 0, 6).unwrap();
editor.delete_at(0, 6);
assert_eq!(
editor,
Core::new("Hello,world!\nThe 2nd line.\nAAABBBCCC.", 0, 6).unwrap()
);
editor.delete_at(0, 12);
assert_eq!(
editor,
Core::new("Hello,world!The 2nd line.\nAAABBBCCC.", 0, 6).unwrap()
);
let mut editor = Core::new("abc\ndef", 0, 3).unwrap();
editor.delete_at(0, 2);
assert_eq!(editor, Core::new("ab\ndef", 0, 2).unwrap());
let mut editor = Core::new("abc\ndef", 1, 0).unwrap();
editor.delete_at(0, 3);
assert_eq!(editor, Core::new("abcdef", 0, 3).unwrap());
editor.delete_at(10, 10);
assert_eq!(editor, Core::new("abcdef", 0, 3).unwrap());
editor.delete_at(0, 1);
assert_eq!(editor, Core::new("acdef", 0, 2).unwrap());
let mut editor = Core::new("abc世界", 0, 3).unwrap();
editor.delete_at(0, 4);
assert_eq!(editor, Core::new("abc世", 0, 3).unwrap());
}
#[test]
fn test_delete_range() {
let buffer = "Hello, world!\nThe 2nd line.\nAAABBBCCC.";
let mut editor = Core::new(buffer, 0, 6).unwrap();
editor.delete_range(Position::new(0, 6)..Position::new(1, 5));
assert_eq!(
editor,
Core::new("Hello,nd line.\nAAABBBCCC.", 0, 6).unwrap()
);
}
#[test]
fn test_next_keyword_position() {
let buffer = "**\n\
a**";
let editor = Core::new(buffer, 0, 1).unwrap();
assert_eq!(editor.next_keyword_position(), Some(Position::new(1, 0)));
let editor = Core::new(buffer, 1, 3).unwrap();
assert_eq!(editor.next_keyword_position(), None);
let buffer = " 12wb12 ";
let editor = Core::new(buffer, 0, 0).unwrap();
assert_eq!(editor.next_keyword_position(), Some(Position::new(0, 1)));
let editor = Core::new(buffer, 0, 1).unwrap();
assert_eq!(editor.next_keyword_position(), None);
}
#[test]
fn test_previous_keyword_position() {
let buffer = "**\n\
a**";
let editor = Core::new(buffer, 0, 1).unwrap();
assert_eq!(editor.previous_keyword_position(), None);
let editor = Core::new(buffer, 1, 3).unwrap();
assert_eq!(
editor.previous_keyword_position(),
Some(Position::new(1, 0))
);
}
#[test]
fn test_next_symbol_position() {
let buffer = "ab\n\
*cd";
let editor = Core::new(buffer, 0, 1).unwrap();
assert_eq!(editor.next_symbol_position(), Some(Position::new(1, 0)));
}
#[test]
fn test_previous_symbol_position() {
let buffer = "ab\n\
*cd";
let editor = Core::new(buffer, 0, 1).unwrap();
assert_eq!(editor.previous_symbol_position(), None);
let editor = Core::new(buffer, 1, 3).unwrap();
assert_eq!(editor.previous_symbol_position(), Some(Position::new(1, 0)));
}
#[test]
fn test_after_position() {
let buffer = "aax\n\
aaa";
let editor = Core::new(buffer, 1, 1).unwrap();
assert_eq!(editor.after_position(|ch| ch == 'x'), None);
let editor = Core::new(buffer, 0, 1).unwrap();
assert_eq!(
editor.after_position(|ch| ch == 'x'),
Some(Position::new(0, 3))
);
let buffer = "aaa\n\
aaa\n";
let editor = Core::new(buffer, 1, 1).unwrap();
assert_eq!(editor.after_position(|ch| ch == '\n'), None);
}
#[test]
fn test_before_position() {
let buffer = "aax\n\
aaa";
let editor = Core::new(buffer, 0, 1).unwrap();
assert_eq!(editor.before_position(|ch| ch == 'x'), None);
let editor = Core::new(buffer, 1, 1).unwrap();
assert_eq!(
editor.before_position(|ch| ch == 'x'),
Some(Position::new(0, 1))
);
}
}
| 31.663825 | 86 | 0.51204 |
accc898b8a8081ce1b82a5e97b2bf6f70c0bbe46 | 1,360 | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:internal_unstable.rs
#![feature(allow_internal_unstable)]
#[macro_use]
extern crate internal_unstable;
macro_rules! foo {
($e: expr, $f: expr) => {{
$e;
$f;
internal_unstable::unstable(); //~ ERROR use of unstable
}}
}
#[allow_internal_unstable]
macro_rules! bar {
($e: expr) => {{
foo!($e,
internal_unstable::unstable());
internal_unstable::unstable();
}}
}
fn main() {
// ok, the instability is contained.
call_unstable_allow!();
construct_unstable_allow!(0);
// bad.
pass_through_allow!(internal_unstable::unstable()); //~ ERROR use of unstable
pass_through_noallow!(internal_unstable::unstable()); //~ ERROR use of unstable
println!("{:?}", internal_unstable::unstable()); //~ ERROR use of unstable
bar!(internal_unstable::unstable()); //~ ERROR use of unstable
}
| 26.666667 | 83 | 0.666912 |
11fbf1cb19b9ff943a406e0b90e554f87d037c02 | 7,258 | // Copyright 2019 Parity Technologies
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use core::{iter::ExactSizeIterator, mem};
use crate::alloc::vec::Vec;
use crate::{Encode, Decode, Error};
use crate::compact::{Compact, CompactLen};
use crate::encode_like::EncodeLike;
/// Trait that allows to append items to an encoded representation without
/// decoding all previous added items.
pub trait EncodeAppend {
/// The item that will be appended.
type Item: Encode;
/// Append all items in `iter` to the given `self_encoded` representation
/// or if `self_encoded` value is empty, `iter` is encoded to the `Self` representation.
///
/// # Example
///
/// ```
///# use parity_scale_codec::EncodeAppend;
///
/// // Some encoded data
/// let data = Vec::new();
///
/// let item = 8u32;
/// let encoded = <Vec<u32> as EncodeAppend>::append_or_new(data, std::iter::once(&item)).expect("Adds new element");
///
/// // Add multiple element
/// <Vec<u32> as EncodeAppend>::append_or_new(encoded, &[700u32, 800u32, 10u32]).expect("Adds new elements");
/// ```
fn append_or_new<EncodeLikeItem, I>(
self_encoded: Vec<u8>,
iter: I,
) -> Result<Vec<u8>, Error>
where
I: IntoIterator<Item = EncodeLikeItem>,
EncodeLikeItem: EncodeLike<Self::Item>,
I::IntoIter: ExactSizeIterator;
}
impl<T: Encode> EncodeAppend for Vec<T> {
type Item = T;
fn append_or_new<EncodeLikeItem, I>(
self_encoded: Vec<u8>,
iter: I,
) -> Result<Vec<u8>, Error>
where
I: IntoIterator<Item = EncodeLikeItem>,
EncodeLikeItem: EncodeLike<Self::Item>,
I::IntoIter: ExactSizeIterator,
{
append_or_new_vec_with_any_item(self_encoded, iter)
}
}
impl<T: Encode> EncodeAppend for crate::alloc::collections::VecDeque<T> {
type Item = T;
fn append_or_new<EncodeLikeItem, I>(
self_encoded: Vec<u8>,
iter: I,
) -> Result<Vec<u8>, Error>
where
I: IntoIterator<Item = EncodeLikeItem>,
EncodeLikeItem: EncodeLike<Self::Item>,
I::IntoIter: ExactSizeIterator,
{
append_or_new_vec_with_any_item(self_encoded, iter)
}
}
fn extract_length_data(data: &[u8], input_len: usize) -> Result<(u32, usize, usize), Error> {
let len = u32::from(Compact::<u32>::decode(&mut &data[..])?);
let new_len = len
.checked_add(input_len as u32)
.ok_or_else(|| "New vec length greater than `u32::TEST_VALUE()`.")?;
let encoded_len = Compact::<u32>::compact_len(&len);
let encoded_new_len = Compact::<u32>::compact_len(&new_len);
Ok((new_len, encoded_len, encoded_new_len))
}
// Item must have same encoding as encoded value in the encoded vec.
fn append_or_new_vec_with_any_item<Item, I>(
mut self_encoded: Vec<u8>,
iter: I,
) -> Result<Vec<u8>, Error>
where
Item: Encode,
I: IntoIterator<Item = Item>,
I::IntoIter: ExactSizeIterator,
{
let iter = iter.into_iter();
let input_len = iter.len();
// No data present, just encode the given input data.
if self_encoded.is_empty() {
crate::codec::compact_encode_len_to(&mut self_encoded, iter.len())?;
iter.for_each(|e| e.encode_to(&mut self_encoded));
return Ok(self_encoded);
}
let (new_len, encoded_len, encoded_new_len) = extract_length_data(&self_encoded, input_len)?;
let replace_len = |dest: &mut Vec<u8>| {
Compact(new_len).using_encoded(|e| {
dest[..encoded_new_len].copy_from_slice(e);
})
};
let append_new_elems = |dest: &mut Vec<u8>| iter.for_each(|a| a.encode_to(dest));
// If old and new encoded len is equal, we don't need to copy the
// already encoded data.
if encoded_len == encoded_new_len {
replace_len(&mut self_encoded);
append_new_elems(&mut self_encoded);
Ok(self_encoded)
} else {
let size = encoded_new_len + self_encoded.len() - encoded_len;
let mut res = Vec::with_capacity(size + input_len * mem::size_of::<Item>());
unsafe { res.set_len(size); }
// Insert the new encoded len, copy the already encoded data and
// add the new element.
replace_len(&mut res);
res[encoded_new_len..size].copy_from_slice(&self_encoded[encoded_len..]);
append_new_elems(&mut res);
Ok(res)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{Input, Encode, EncodeLike};
use std::collections::VecDeque;
const TEST_VALUE: u32 = {
#[cfg(not(miri))]
{ 1_000_000 }
#[cfg(miri)]
{ 1_000 }
};
#[test]
fn vec_encode_append_works() {
let encoded = (0..TEST_VALUE).fold(Vec::new(), |encoded, v| {
<Vec<u32> as EncodeAppend>::append_or_new(encoded, std::iter::once(&v)).unwrap()
});
let decoded = Vec::<u32>::decode(&mut &encoded[..]).unwrap();
assert_eq!(decoded, (0..TEST_VALUE).collect::<Vec<_>>());
}
#[test]
fn vec_encode_append_multiple_items_works() {
let encoded = (0..TEST_VALUE).fold(Vec::new(), |encoded, v| {
<Vec<u32> as EncodeAppend>::append_or_new(encoded, &[v, v, v, v]).unwrap()
});
let decoded = Vec::<u32>::decode(&mut &encoded[..]).unwrap();
let expected = (0..TEST_VALUE).fold(Vec::new(), |mut vec, i| {
vec.append(&mut vec![i, i, i, i]);
vec
});
assert_eq!(decoded, expected);
}
#[test]
fn vecdeque_encode_append_works() {
let encoded = (0..TEST_VALUE).fold(Vec::new(), |encoded, v| {
<VecDeque<u32> as EncodeAppend>::append_or_new(encoded, std::iter::once(&v)).unwrap()
});
let decoded = VecDeque::<u32>::decode(&mut &encoded[..]).unwrap();
assert_eq!(decoded, (0..TEST_VALUE).collect::<Vec<_>>());
}
#[test]
fn vecdeque_encode_append_multiple_items_works() {
let encoded = (0..TEST_VALUE).fold(Vec::new(), |encoded, v| {
<VecDeque<u32> as EncodeAppend>::append_or_new(encoded, &[v, v, v, v]).unwrap()
});
let decoded = VecDeque::<u32>::decode(&mut &encoded[..]).unwrap();
let expected = (0..TEST_VALUE).fold(Vec::new(), |mut vec, i| {
vec.append(&mut vec![i, i, i, i]);
vec
});
assert_eq!(decoded, expected);
}
#[test]
fn append_non_copyable() {
#[derive(Eq, PartialEq, Debug)]
struct NoCopy { data: u32 }
impl EncodeLike for NoCopy {}
impl Encode for NoCopy {
fn encode(&self) -> Vec<u8> {
self.data.encode()
}
}
impl Decode for NoCopy {
fn decode<I: Input>(input: &mut I) -> Result<Self, Error> {
u32::decode(input).map(|data| Self { data })
}
}
let append = NoCopy { data: 100 };
let data = Vec::new();
let encoded = <Vec<NoCopy> as EncodeAppend>::append_or_new(data, std::iter::once(&append)).unwrap();
let decoded = <Vec<NoCopy>>::decode(&mut &encoded[..]).unwrap();
assert_eq!(vec![append], decoded);
}
#[test]
fn vec_encode_like_append_works() {
let encoded = (0..TEST_VALUE).fold(Vec::new(), |encoded, v| {
<Vec<u32> as EncodeAppend>::append_or_new(encoded, std::iter::once(Box::new(v as u32))).unwrap()
});
let decoded = Vec::<u32>::decode(&mut &encoded[..]).unwrap();
assert_eq!(decoded, (0..TEST_VALUE).collect::<Vec<_>>());
}
}
| 29.032 | 118 | 0.671259 |
33917c1f36f004e0a5294193be3558b9330fc33f | 1,250 | use core_sled::SledKeySpace;
use super::kv_types::{
FSMMetaKey, LogMetaKey, LogMetaValue, RaftStateKey, RaftStateValue, StateMachineMetaValue,
};
use crate::types::openraft::{Entry, LogIndex};
/// Types for raft log in SledTree
pub struct Logs {}
impl SledKeySpace for Logs {
const PREFIX: u8 = 1;
const NAME: &'static str = "log";
type K = LogIndex;
type V = Entry;
}
/// Types for raft log meta data in SledTree
pub struct LogMeta {}
impl SledKeySpace for LogMeta {
const PREFIX: u8 = 13;
const NAME: &'static str = "log-meta";
type K = LogMetaKey;
type V = LogMetaValue;
}
/// Key-Value Types for storing meta data of a raft state machine in sled::Tree,
/// e.g. the last applied log id.
pub struct FSMMeta {}
impl SledKeySpace for FSMMeta {
const PREFIX: u8 = 3;
const NAME: &'static str = "sm-meta";
type K = FSMMetaKey;
type V = StateMachineMetaValue;
}
/// Key-Value Types for storing meta data of a raft in sled::Tree:
/// id: NodeId,
/// hard_state:
/// current_term,
/// voted_for,
pub struct RaftStateKV {}
impl SledKeySpace for RaftStateKV {
const PREFIX: u8 = 4;
const NAME: &'static str = "raft-state";
type K = RaftStateKey;
type V = RaftStateValue;
}
| 26.041667 | 94 | 0.668 |
79c211e6182297d678d7c325561ca70e847a70e9 | 57,397 | use anyhow::Context;
use detail::DeploymentDetail;
use diesel::connection::SimpleConnection;
use diesel::pg::PgConnection;
use diesel::prelude::*;
use diesel::r2d2::{ConnectionManager, PooledConnection};
use graph::components::store::{EntityType, StoredDynamicDataSource};
use graph::data::subgraph::status;
use graph::prelude::{
tokio, CancelHandle, CancelToken, CancelableError, PoolWaitStats, SubgraphDeploymentEntity,
};
use lru_time_cache::LruCache;
use rand::{seq::SliceRandom, thread_rng};
use std::borrow::Cow;
use std::collections::{BTreeMap, HashMap};
use std::convert::Into;
use std::convert::TryInto;
use std::env;
use std::iter::FromIterator;
use std::ops::Bound;
use std::ops::Deref;
use std::str::FromStr;
use std::sync::{atomic::AtomicUsize, Arc, Mutex};
use std::time::Duration;
use std::time::Instant;
use graph::components::store::EntityCollection;
use graph::components::subgraph::ProofOfIndexingFinisher;
use graph::constraint_violation;
use graph::data::subgraph::schema::{SubgraphError, POI_OBJECT};
use graph::prelude::{
anyhow, debug, info, lazy_static, o, warn, web3, ApiSchema, AttributeNames, BlockNumber,
BlockPtr, CheapClone, DeploymentHash, DeploymentState, Entity, EntityKey, EntityModification,
EntityQuery, Error, Logger, QueryExecutionError, Schema, StopwatchMetrics, StoreError,
StoreEvent, Value, BLOCK_NUMBER_MAX,
};
use graph_graphql::prelude::api_schema;
use web3::types::Address;
use crate::block_range::block_number;
use crate::catalog;
use crate::deployment;
use crate::relational::{Layout, LayoutCache, SqlName, Table};
use crate::relational_queries::FromEntityData;
use crate::{connection_pool::ConnectionPool, detail};
use crate::{dynds, primary::Site};
lazy_static! {
/// `GRAPH_QUERY_STATS_REFRESH_INTERVAL` is how long statistics that
/// influence query execution are cached in memory (in seconds) before
/// they are reloaded from the database. Defaults to 300s (5 minutes).
static ref STATS_REFRESH_INTERVAL: Duration = {
env::var("GRAPH_QUERY_STATS_REFRESH_INTERVAL")
.ok()
.map(|s| {
let secs = u64::from_str(&s).unwrap_or_else(|_| {
panic!("GRAPH_QUERY_STATS_REFRESH_INTERVAL must be a number, but is `{}`", s)
});
Duration::from_secs(secs)
}).unwrap_or(Duration::from_secs(300))
};
}
/// When connected to read replicas, this allows choosing which DB server to use for an operation.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ReplicaId {
/// The main server has write and read access.
Main,
/// A read replica identified by its index.
ReadOnly(usize),
}
/// Commonly needed information about a subgraph that we cache in
/// `Store.subgraph_cache`. Only immutable subgraph data can be cached this
/// way as the cache lives for the lifetime of the `Store` object
#[derive(Clone)]
pub(crate) struct SubgraphInfo {
/// The schema as supplied by the user
pub(crate) input: Arc<Schema>,
/// The schema we derive from `input` with `graphql::schema::api::api_schema`
pub(crate) api: Arc<ApiSchema>,
/// The block number at which this subgraph was grafted onto
/// another one. We do not allow reverting past this block
pub(crate) graft_block: Option<BlockNumber>,
/// The deployment hash of the remote subgraph whose store
/// will be GraphQL queried, for debugging purposes.
pub(crate) debug_fork: Option<DeploymentHash>,
pub(crate) description: Option<String>,
pub(crate) repository: Option<String>,
}
pub struct StoreInner {
logger: Logger,
pool: ConnectionPool,
read_only_pools: Vec<ConnectionPool>,
/// A list of the available replicas set up such that when we run
/// through the list once, we picked each replica according to its
/// desired weight. Each replica can appear multiple times in the list
replica_order: Vec<ReplicaId>,
/// The current position in `replica_order` so we know which one to
/// pick next
conn_round_robin_counter: AtomicUsize,
/// A cache of commonly needed data about a subgraph.
subgraph_cache: Mutex<LruCache<DeploymentHash, SubgraphInfo>>,
/// A cache for the layout metadata for subgraphs. The Store just
/// hosts this because it lives long enough, but it is managed from
/// the entities module
pub(crate) layout_cache: LayoutCache,
}
/// Storage of the data for individual deployments. Each `DeploymentStore`
/// corresponds to one of the database shards that `SubgraphStore` manages.
#[derive(Clone)]
pub struct DeploymentStore(Arc<StoreInner>);
impl CheapClone for DeploymentStore {}
impl Deref for DeploymentStore {
type Target = StoreInner;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DeploymentStore {
pub fn new(
logger: &Logger,
pool: ConnectionPool,
read_only_pools: Vec<ConnectionPool>,
mut pool_weights: Vec<usize>,
) -> Self {
// Create a store-specific logger
let logger = logger.new(o!("component" => "Store"));
// Create a list of replicas with repetitions according to the weights
// and shuffle the resulting list. Any missing weights in the list
// default to 1
pool_weights.resize(read_only_pools.len() + 1, 1);
let mut replica_order: Vec<_> = pool_weights
.iter()
.enumerate()
.map(|(i, weight)| {
let replica = if i == 0 {
ReplicaId::Main
} else {
ReplicaId::ReadOnly(i - 1)
};
vec![replica; *weight]
})
.flatten()
.collect();
let mut rng = thread_rng();
replica_order.shuffle(&mut rng);
debug!(logger, "Using postgres host order {:?}", replica_order);
// Create the store
let store = StoreInner {
logger: logger.clone(),
pool,
read_only_pools,
replica_order,
conn_round_robin_counter: AtomicUsize::new(0),
subgraph_cache: Mutex::new(LruCache::with_capacity(100)),
layout_cache: LayoutCache::new(*STATS_REFRESH_INTERVAL),
};
let store = DeploymentStore(Arc::new(store));
// Return the store
store
}
pub(crate) fn create_deployment(
&self,
schema: &Schema,
deployment: SubgraphDeploymentEntity,
site: Arc<Site>,
graft_base: Option<Arc<Layout>>,
replace: bool,
) -> Result<(), StoreError> {
let conn = self.get_conn()?;
conn.transaction(|| -> Result<_, StoreError> {
let exists = deployment::exists(&conn, &site)?;
// Create (or update) the metadata. Update only happens in tests
if replace || !exists {
deployment::create_deployment(
&conn,
&site,
deployment,
exists,
replace,
)?;
};
// Create the schema for the subgraph data
if !exists {
let query = format!("create schema {}", &site.namespace);
conn.batch_execute(&query)?;
let layout = Layout::create_relational_schema(&conn, site.clone(), schema)?;
// See if we are grafting and check that the graft is permissible
if let Some(base) = graft_base {
let errors = layout.can_copy_from(&base);
if !errors.is_empty() {
return Err(StoreError::Unknown(anyhow!(
"The subgraph `{}` cannot be used as the graft base \
for `{}` because the schemas are incompatible:\n - {}",
&base.catalog.site.namespace,
&layout.catalog.site.namespace,
errors.join("\n - ")
)));
}
}
}
Ok(())
})
}
pub(crate) fn load_deployment(
&self,
site: &Site,
) -> Result<SubgraphDeploymentEntity, StoreError> {
let conn = self.get_conn()?;
detail::deployment_entity(&conn, site)
}
// Remove the data and metadata for the deployment `site`. This operation
// is not reversible
pub(crate) fn drop_deployment(&self, site: &Site) -> Result<(), StoreError> {
let conn = self.get_conn()?;
conn.transaction(|| {
crate::deployment::drop_schema(&conn, &site.namespace)?;
crate::dynds::drop(&conn, &site.deployment)?;
crate::deployment::drop_metadata(&conn, site)
})
}
pub(crate) fn execute_query<T: FromEntityData>(
&self,
conn: &PgConnection,
site: Arc<Site>,
query: EntityQuery,
) -> Result<Vec<T>, QueryExecutionError> {
let layout = self.layout(conn, site)?;
let logger = query.logger.unwrap_or(self.logger.clone());
layout.query(
&logger,
conn,
query.collection,
query.filter,
query.order,
query.range,
query.block,
query.query_id,
)
}
fn check_interface_entity_uniqueness(
&self,
conn: &PgConnection,
layout: &Layout,
key: &EntityKey,
) -> Result<(), StoreError> {
assert_eq!(&key.subgraph_id, &layout.site.deployment);
// Collect all types that share an interface implementation with this
// entity type, and make sure there are no conflicting IDs.
//
// To understand why this is necessary, suppose that `Dog` and `Cat` are
// types and both implement an interface `Pet`, and both have instances
// with `id: "Fred"`. If a type `PetOwner` has a field `pets: [Pet]`
// then with the value `pets: ["Fred"]`, there's no way to disambiguate
// if that's Fred the Dog, Fred the Cat or both.
//
// This assumes that there are no concurrent writes to a subgraph.
let schema = self.subgraph_info_with_conn(&conn, &layout.site)?.api;
let types_for_interface = schema.types_for_interface();
let entity_type = key.entity_type.to_string();
let types_with_shared_interface = Vec::from_iter(
schema
.interfaces_for_type(&key.entity_type)
.into_iter()
.flatten()
.map(|interface| &types_for_interface[&interface.into()])
.flatten()
.map(EntityType::from)
.filter(|type_name| type_name != &key.entity_type),
);
if !types_with_shared_interface.is_empty() {
if let Some(conflicting_entity) =
layout.conflicting_entity(conn, &key.entity_id, types_with_shared_interface)?
{
return Err(StoreError::ConflictingId(
entity_type.clone(),
key.entity_id.clone(),
conflicting_entity,
));
}
}
Ok(())
}
fn apply_entity_modifications(
&self,
conn: &PgConnection,
layout: &Layout,
mods: &[EntityModification],
ptr: &BlockPtr,
stopwatch: StopwatchMetrics,
) -> Result<i32, StoreError> {
use EntityModification::*;
let mut count = 0;
// Group `Insert`s and `Overwrite`s by key, and accumulate `Remove`s.
let mut inserts = HashMap::new();
let mut overwrites = HashMap::new();
let mut removals = HashMap::new();
for modification in mods.into_iter() {
match modification {
Insert { key, data } => {
inserts
.entry(key.entity_type.clone())
.or_insert_with(Vec::new)
.push((key, Cow::from(data)));
}
Overwrite { key, data } => {
overwrites
.entry(key.entity_type.clone())
.or_insert_with(Vec::new)
.push((key, Cow::from(data)));
}
Remove { key } => {
removals
.entry(key.entity_type.clone())
.or_insert_with(Vec::new)
.push(key.entity_id.as_str());
}
}
}
// Apply modification groups.
// Inserts:
for (entity_type, mut entities) in inserts.into_iter() {
count +=
self.insert_entities(&entity_type, &mut entities, conn, layout, ptr, &stopwatch)?
as i32
}
// Overwrites:
for (entity_type, mut entities) in overwrites.into_iter() {
// we do not update the count since the number of entities remains the same
self.overwrite_entities(&entity_type, &mut entities, conn, layout, ptr, &stopwatch)?;
}
// Removals
for (entity_type, entity_keys) in removals.into_iter() {
count -= self.remove_entities(
&entity_type,
entity_keys.as_slice(),
conn,
layout,
ptr,
&stopwatch,
)? as i32;
}
Ok(count)
}
fn insert_entities<'a>(
&'a self,
entity_type: &'a EntityType,
data: &'a mut [(&'a EntityKey, Cow<'a, Entity>)],
conn: &PgConnection,
layout: &'a Layout,
ptr: &BlockPtr,
stopwatch: &StopwatchMetrics,
) -> Result<usize, StoreError> {
let section = stopwatch.start_section("check_interface_entity_uniqueness");
for (key, _) in data.iter() {
// WARNING: This will potentially execute 2 queries for each entity key.
self.check_interface_entity_uniqueness(conn, layout, key)?;
}
section.end();
let _section = stopwatch.start_section("apply_entity_modifications_insert");
layout.insert(conn, entity_type, data, block_number(ptr), stopwatch)
}
fn overwrite_entities<'a>(
&'a self,
entity_type: &'a EntityType,
data: &'a mut [(&'a EntityKey, Cow<'a, Entity>)],
conn: &PgConnection,
layout: &'a Layout,
ptr: &BlockPtr,
stopwatch: &StopwatchMetrics,
) -> Result<usize, StoreError> {
let section = stopwatch.start_section("check_interface_entity_uniqueness");
for (key, _) in data.iter() {
// WARNING: This will potentially execute 2 queries for each entity key.
self.check_interface_entity_uniqueness(conn, layout, key)?;
}
section.end();
let _section = stopwatch.start_section("apply_entity_modifications_update");
layout.update(conn, &entity_type, data, block_number(ptr), stopwatch)
}
fn remove_entities(
&self,
entity_type: &EntityType,
entity_keys: &[&str],
conn: &PgConnection,
layout: &Layout,
ptr: &BlockPtr,
stopwatch: &StopwatchMetrics,
) -> Result<usize, StoreError> {
let _section = stopwatch.start_section("apply_entity_modifications_delete");
layout
.delete(
conn,
entity_type,
&entity_keys,
block_number(ptr),
stopwatch,
)
.map_err(|_error| anyhow!("Failed to remove entities: {:?}", entity_keys).into())
}
/// Execute a closure with a connection to the database.
///
/// # API
/// The API of using a closure to bound the usage of the connection serves several
/// purposes:
///
/// * Moves blocking database access out of the `Future::poll`. Within
/// `Future::poll` (which includes all `async` methods) it is illegal to
/// perform a blocking operation. This includes all accesses to the
/// database, acquiring of locks, etc. Calling a blocking operation can
/// cause problems with `Future` combinators (including but not limited
/// to select, timeout, and FuturesUnordered) and problems with
/// executors/runtimes. This method moves the database work onto another
/// thread in a way which does not block `Future::poll`.
///
/// * Limit the total number of connections. Because the supplied closure
/// takes a reference, we know the scope of the usage of all entity
/// connections and can limit their use in a non-blocking way.
///
/// # Cancellation
/// The normal pattern for futures in Rust is drop to cancel. Once we
/// spawn the database work in a thread though, this expectation no longer
/// holds because the spawned task is the independent of this future. So,
/// this method provides a cancel token which indicates that the `Future`
/// has been dropped. This isn't *quite* as good as drop on cancel,
/// because a drop on cancel can do things like cancel http requests that
/// are in flight, but checking for cancel periodically is a significant
/// improvement.
///
/// The implementation of the supplied closure should check for cancel
/// between every operation that is potentially blocking. This includes
/// any method which may interact with the database. The check can be
/// conveniently written as `token.check_cancel()?;`. It is low overhead
/// to check for cancel, so when in doubt it is better to have too many
/// checks than too few.
///
/// # Panics:
/// * This task will panic if the supplied closure panics
/// * This task will panic if the supplied closure returns Err(Cancelled)
/// when the supplied cancel token is not cancelled.
pub(crate) async fn with_conn<T: Send + 'static>(
&self,
f: impl 'static
+ Send
+ FnOnce(
&PooledConnection<ConnectionManager<PgConnection>>,
&CancelHandle,
) -> Result<T, CancelableError<StoreError>>,
) -> Result<T, StoreError> {
self.pool.with_conn(f).await
}
/// Deprecated. Use `with_conn` instead.
fn get_conn(&self) -> Result<PooledConnection<ConnectionManager<PgConnection>>, StoreError> {
self.pool.get()
}
/// Panics if `idx` is not a valid index for a read only pool.
fn read_only_conn(
&self,
idx: usize,
) -> Result<PooledConnection<ConnectionManager<PgConnection>>, Error> {
self.read_only_pools[idx].get().map_err(Error::from)
}
pub(crate) fn get_replica_conn(
&self,
replica: ReplicaId,
) -> Result<PooledConnection<ConnectionManager<PgConnection>>, Error> {
let conn = match replica {
ReplicaId::Main => self.get_conn()?,
ReplicaId::ReadOnly(idx) => self.read_only_conn(idx)?,
};
Ok(conn)
}
pub(crate) async fn query_permit(
&self,
replica: ReplicaId,
) -> tokio::sync::OwnedSemaphorePermit {
let pool = match replica {
ReplicaId::Main => &self.pool,
ReplicaId::ReadOnly(idx) => &self.read_only_pools[idx],
};
pool.query_permit().await
}
pub(crate) fn wait_stats(&self, replica: ReplicaId) -> PoolWaitStats {
match replica {
ReplicaId::Main => self.pool.wait_stats(),
ReplicaId::ReadOnly(idx) => self.read_only_pools[idx].wait_stats(),
}
}
/// Return the layout for a deployment. Since constructing a `Layout`
/// object takes a bit of computation, we cache layout objects that do
/// not have a pending migration in the Store, i.e., for the lifetime of
/// the Store. Layout objects with a pending migration can not be
/// cached for longer than a transaction since they might change
/// without us knowing
pub(crate) fn layout(
&self,
conn: &PgConnection,
site: Arc<Site>,
) -> Result<Arc<Layout>, StoreError> {
self.layout_cache.get(&self.logger, conn, site)
}
/// Return the layout for a deployment. This might use a database
/// connection for the lookup and should only be called if the caller
/// does not have a connection currently. If it does, use `layout`
pub(crate) fn find_layout(&self, site: Arc<Site>) -> Result<Arc<Layout>, StoreError> {
if let Some(layout) = self.layout_cache.find(site.as_ref()) {
return Ok(layout.clone());
}
let conn = self.get_conn()?;
self.layout(&conn, site)
}
fn subgraph_info_with_conn(
&self,
conn: &PgConnection,
site: &Site,
) -> Result<SubgraphInfo, StoreError> {
if let Some(info) = self.subgraph_cache.lock().unwrap().get(&site.deployment) {
return Ok(info.clone());
}
let (input_schema, description, repository) = deployment::manifest_info(&conn, site)?;
let graft_block =
deployment::graft_point(&conn, &site.deployment)?.map(|(_, ptr)| ptr.number as i32);
let debug_fork = deployment::debug_fork(&conn, &site.deployment)?;
// Generate an API schema for the subgraph and make sure all types in the
// API schema have a @subgraphId directive as well
let mut schema = input_schema.clone();
schema.document =
api_schema(&schema.document).map_err(|e| StoreError::Unknown(e.into()))?;
schema.add_subgraph_id_directives(site.deployment.clone());
let info = SubgraphInfo {
input: Arc::new(input_schema),
api: Arc::new(ApiSchema::from_api_schema(schema)?),
graft_block,
debug_fork,
description,
repository,
};
// Insert the schema into the cache.
let mut cache = self.subgraph_cache.lock().unwrap();
cache.insert(site.deployment.clone(), info);
Ok(cache.get(&site.deployment).unwrap().clone())
}
pub(crate) fn subgraph_info(&self, site: &Site) -> Result<SubgraphInfo, StoreError> {
if let Some(info) = self.subgraph_cache.lock().unwrap().get(&site.deployment) {
return Ok(info.clone());
}
let conn = self.get_conn()?;
self.subgraph_info_with_conn(&conn, site)
}
fn block_ptr_with_conn(
subgraph_id: &DeploymentHash,
conn: &PgConnection,
) -> Result<Option<BlockPtr>, StoreError> {
deployment::block_ptr(&conn, subgraph_id)
}
pub(crate) fn deployment_details(
&self,
ids: Vec<String>,
) -> Result<Vec<DeploymentDetail>, StoreError> {
let conn = self.get_conn()?;
conn.transaction(|| -> Result<_, StoreError> { detail::deployment_details(&conn, ids) })
}
pub(crate) fn deployment_statuses(
&self,
sites: &Vec<Arc<Site>>,
) -> Result<Vec<status::Info>, StoreError> {
let conn = self.get_conn()?;
conn.transaction(|| -> Result<Vec<status::Info>, StoreError> {
detail::deployment_statuses(&conn, sites)
})
}
pub(crate) fn deployment_exists_and_synced(
&self,
id: &DeploymentHash,
) -> Result<bool, StoreError> {
let conn = self.get_conn()?;
deployment::exists_and_synced(&conn, id.as_str())
}
pub(crate) fn deployment_synced(&self, id: &DeploymentHash) -> Result<(), StoreError> {
let conn = self.get_conn()?;
conn.transaction(|| deployment::set_synced(&conn, id))
}
// Only used for tests
#[cfg(debug_assertions)]
pub(crate) fn drop_deployment_schema(
&self,
namespace: &crate::primary::Namespace,
) -> Result<(), StoreError> {
let conn = self.get_conn()?;
deployment::drop_schema(&conn, namespace)
}
// Only used for tests
#[cfg(debug_assertions)]
pub(crate) fn drop_all_metadata(&self) -> Result<(), StoreError> {
// Delete metadata entities in each shard
// This needs to touch all the tables in the subgraphs schema
const QUERY: &str = "
delete from subgraphs.dynamic_ethereum_contract_data_source;
delete from subgraphs.subgraph;
delete from subgraphs.subgraph_deployment;
delete from subgraphs.subgraph_deployment_assignment;
delete from subgraphs.subgraph_version;
delete from subgraphs.subgraph_manifest;
delete from subgraphs.copy_table_state;
delete from subgraphs.copy_state;
delete from active_copies;
";
let conn = self.get_conn()?;
conn.batch_execute(QUERY)?;
conn.batch_execute("delete from deployment_schemas;")?;
Ok(())
}
pub(crate) async fn vacuum(&self) -> Result<(), StoreError> {
self.with_conn(|conn, _| {
conn.batch_execute("vacuum (analyze) subgraphs.subgraph_deployment")?;
Ok(())
})
.await
}
/// Runs the SQL `ANALYZE` command in a table.
pub(crate) async fn analyze(
&self,
site: Arc<Site>,
entity_name: &str,
) -> Result<(), StoreError> {
let store = self.clone();
let entity_name = entity_name.to_owned();
self.with_conn(move |conn, _| {
let layout = store.layout(conn, site)?;
let table = resolve_table_name(&layout, &entity_name)?;
let table_name = &table.qualified_name;
let sql = format!("analyze {table_name}");
conn.execute(&sql)?;
Ok(())
})
.await
}
/// Creates a new index in the specified Entity table if it doesn't already exist.
///
/// This is a potentially time-consuming operation.
pub(crate) async fn create_manual_index(
&self,
site: Arc<Site>,
entity_name: &str,
field_names: Vec<String>,
index_method: String,
) -> Result<(), StoreError> {
let store = self.clone();
let entity_name = entity_name.to_owned();
self.with_conn(move |conn, _| {
let schema_name = site.namespace.clone();
let layout = store.layout(conn, site)?;
let table = resolve_table_name(&layout, &entity_name)?;
let column_names = resolve_column_names(table, &field_names)?;
let column_names_sep_by_underscores = column_names.join("_");
let column_names_sep_by_commas = column_names.join(", ");
let table_name = &table.name;
let index_name = format!("manual_{table_name}_{column_names_sep_by_underscores}");
let sql = format!(
"create index concurrently if not exists {index_name} \
on {schema_name}.{table_name} using {index_method} \
({column_names_sep_by_commas})"
);
// This might take a long time.
conn.execute(&sql)?;
// check if the index creation was successfull
let index_is_valid =
catalog::check_index_is_valid(conn, schema_name.as_str(), &index_name)?;
if index_is_valid {
Ok(())
} else {
// Index creation falied. We should drop the index before returning.
let drop_index_sql =
format!("drop index concurrently if exists {schema_name}.{index_name}");
conn.execute(&drop_index_sql)?;
Err(StoreError::Canceled)
}
.map_err(Into::into)
})
.await
}
/// Returns a list of all existing indexes for the specified Entity table.
pub(crate) async fn indexes_for_entity(
&self,
site: Arc<Site>,
entity_name: &str,
) -> Result<Vec<String>, StoreError> {
let store = self.clone();
let entity_name = entity_name.to_owned();
self.with_conn(move |conn, _| {
let schema_name = site.namespace.clone();
let layout = store.layout(conn, site)?;
let table = resolve_table_name(&layout, &entity_name)?;
let table_name = &table.name;
catalog::indexes_for_table(conn, schema_name.as_str(), table_name.as_str())
.map_err(Into::into)
})
.await
}
/// Drops an index for a given deployment, concurrently.
pub(crate) async fn drop_index(
&self,
site: Arc<Site>,
index_name: &str,
) -> Result<(), StoreError> {
let index_name = String::from(index_name);
self.with_conn(move |conn, _| {
let schema_name = site.namespace.clone();
catalog::drop_index(conn, schema_name.as_str(), &index_name).map_err(Into::into)
})
.await
}
}
/// Methods that back the trait `graph::components::Store`, but have small
/// variations in their signatures
impl DeploymentStore {
pub(crate) fn block_ptr(&self, site: &Site) -> Result<Option<BlockPtr>, StoreError> {
let conn = self.get_conn()?;
Self::block_ptr_with_conn(&site.deployment, &conn)
}
pub(crate) fn block_cursor(&self, site: &Site) -> Result<Option<String>, StoreError> {
let conn = self.get_conn()?;
Ok(deployment::get_subgraph_firehose_cursor(
&conn,
&site.deployment,
)?)
}
pub(crate) async fn supports_proof_of_indexing<'a>(
&self,
site: Arc<Site>,
) -> Result<bool, StoreError> {
let store = self.clone();
self.with_conn(move |conn, cancel| {
cancel.check_cancel()?;
let layout = store.layout(conn, site)?;
Ok(layout.supports_proof_of_indexing())
})
.await
.map_err(Into::into)
}
pub(crate) async fn get_proof_of_indexing(
&self,
site: Arc<Site>,
indexer: &Option<Address>,
block: BlockPtr,
) -> Result<Option<[u8; 32]>, StoreError> {
let indexer = *indexer;
let site3 = site.clone();
let site4 = site.clone();
let store = self.clone();
let block2 = block.clone();
let entities = self
.with_conn(move |conn, cancel| {
cancel.check_cancel()?;
let layout = store.layout(conn, site4.clone())?;
if !layout.supports_proof_of_indexing() {
return Ok(None);
}
conn.transaction::<_, CancelableError<anyhow::Error>, _>(move || {
let latest_block_ptr = match Self::block_ptr_with_conn(&site.deployment, conn)?
{
Some(inner) => inner,
None => return Ok(None),
};
cancel.check_cancel()?;
// FIXME: (Determinism)
//
// It is vital to ensure that the block hash given in the query
// is a parent of the latest block indexed for the subgraph.
// Unfortunately the machinery needed to do this is not yet in place.
// The best we can do right now is just to make sure that the block number
// is high enough.
if latest_block_ptr.number < block.number {
return Ok(None);
}
let query = EntityQuery::new(
site4.deployment.clone(),
block.number.try_into().unwrap(),
EntityCollection::All(vec![(
POI_OBJECT.cheap_clone(),
AttributeNames::All,
)]),
);
let entities = store
.execute_query::<Entity>(conn, site4, query)
.map_err(anyhow::Error::from)?;
Ok(Some(entities))
})
.map_err(Into::into)
})
.await?;
let entities = if let Some(entities) = entities {
entities
} else {
return Ok(None);
};
let mut by_causality_region = entities
.into_iter()
.map(|e| {
let causality_region = e.id()?;
let digest = match e.get("digest") {
Some(Value::Bytes(b)) => Ok(b.to_owned()),
other => Err(anyhow::anyhow!(
"Entity has non-bytes digest attribute: {:?}",
other
)),
}?;
Ok((causality_region, digest))
})
.collect::<Result<HashMap<_, _>, anyhow::Error>>()?;
let mut finisher = ProofOfIndexingFinisher::new(&block2, &site3.deployment, &indexer);
for (name, region) in by_causality_region.drain() {
finisher.add_causality_region(&name, ®ion);
}
Ok(Some(finisher.finish()))
}
pub(crate) fn get(
&self,
site: Arc<Site>,
key: &EntityKey,
) -> Result<Option<Entity>, StoreError> {
let conn = self.get_conn()?;
let layout = self.layout(&conn, site)?;
// We should really have callers pass in a block number; but until
// that is fully plumbed in, we just use the biggest possible block
// number so that we will always return the latest version,
// i.e., the one with an infinite upper bound
layout.find(&conn, &key.entity_type, &key.entity_id, BLOCK_NUMBER_MAX)
}
pub(crate) fn get_many(
&self,
site: Arc<Site>,
ids_for_type: &BTreeMap<&EntityType, Vec<&str>>,
) -> Result<BTreeMap<EntityType, Vec<Entity>>, StoreError> {
if ids_for_type.is_empty() {
return Ok(BTreeMap::new());
}
let conn = self.get_conn()?;
let layout = self.layout(&conn, site)?;
layout.find_many(&conn, ids_for_type, BLOCK_NUMBER_MAX)
}
// Only used by tests
#[cfg(debug_assertions)]
pub(crate) fn find(
&self,
site: Arc<Site>,
query: EntityQuery,
) -> Result<Vec<Entity>, QueryExecutionError> {
let conn = self.get_conn()?;
self.execute_query(&conn, site, query)
}
pub(crate) fn transact_block_operations(
&self,
site: Arc<Site>,
block_ptr_to: &BlockPtr,
firehose_cursor: Option<&str>,
mods: &[EntityModification],
stopwatch: StopwatchMetrics,
data_sources: &[StoredDynamicDataSource],
deterministic_errors: &[SubgraphError],
) -> Result<StoreEvent, StoreError> {
// All operations should apply only to data or metadata for this subgraph
if mods
.iter()
.map(|modification| modification.entity_key())
.any(|key| key.subgraph_id != site.deployment)
{
panic!(
"transact_block_operations must affect only entities \
in the subgraph or in the subgraph of subgraphs"
);
}
let conn = {
let _section = stopwatch.start_section("transact_blocks_get_conn");
self.get_conn()?
};
let event = conn.transaction(|| -> Result<_, StoreError> {
// Emit a store event for the changes we are about to make. We
// wait with sending it until we have done all our other work
// so that we do not hold a lock on the notification queue
// for longer than we have to
let event: StoreEvent = mods.iter().collect();
// Make the changes
let layout = self.layout(&conn, site.clone())?;
let section = stopwatch.start_section("apply_entity_modifications");
let count = self.apply_entity_modifications(
&conn,
layout.as_ref(),
mods,
&block_ptr_to,
stopwatch,
)?;
deployment::update_entity_count(
&conn,
site.as_ref(),
layout.count_query.as_str(),
count,
)?;
section.end();
dynds::insert(&conn, &site.deployment, data_sources, &block_ptr_to)?;
if !deterministic_errors.is_empty() {
deployment::insert_subgraph_errors(
&conn,
&site.deployment,
deterministic_errors,
block_ptr_to.block_number(),
)?;
}
deployment::forward_block_ptr(&conn, &site.deployment, block_ptr_to)?;
if let Some(cursor) = firehose_cursor {
if cursor != "" {
deployment::update_firehose_cursor(&conn, &site.deployment, cursor)?;
}
}
Ok(event)
})?;
Ok(event)
}
fn rewind_with_conn(
&self,
conn: &PgConnection,
site: Arc<Site>,
block_ptr_to: BlockPtr,
firehose_cursor: Option<&str>,
) -> Result<StoreEvent, StoreError> {
let event = conn.transaction(|| -> Result<_, StoreError> {
// Don't revert past a graft point
let info = self.subgraph_info_with_conn(&conn, site.as_ref())?;
if let Some(graft_block) = info.graft_block {
if graft_block > block_ptr_to.number {
return Err(anyhow!(
"Can not revert subgraph `{}` to block {} as it was \
grafted at block {} and reverting past a graft point \
is not possible",
site.deployment.clone(),
block_ptr_to.number,
graft_block
)
.into());
}
}
deployment::revert_block_ptr(&conn, &site.deployment, block_ptr_to.clone())?;
if let Some(cursor) = firehose_cursor {
deployment::update_firehose_cursor(&conn, &site.deployment, cursor)
.context("updating firehose cursor")?;
}
// Revert the data
let layout = self.layout(&conn, site.clone())?;
// At 1 block per 15 seconds, the maximum i32
// value affords just over 1020 years of blocks.
let block: BlockNumber = block_ptr_to
.number
.try_into()
.expect("block numbers fit into an i32");
// The revert functions want the number of the first block that we need to get rid of
let block = block + 1;
let (event, count) = layout.revert_block(&conn, &site.deployment, block)?;
// Revert the meta data changes that correspond to this subgraph.
// Only certain meta data changes need to be reverted, most
// importantly creation of dynamic data sources. We ensure in the
// rest of the code that we only record history for those meta data
// changes that might need to be reverted
Layout::revert_metadata(&conn, &site.deployment, block)?;
deployment::update_entity_count(
&conn,
site.as_ref(),
layout.count_query.as_str(),
count,
)?;
Ok(event)
})?;
Ok(event)
}
pub(crate) fn rewind(
&self,
site: Arc<Site>,
block_ptr_to: BlockPtr,
) -> Result<StoreEvent, StoreError> {
let conn = self.get_conn()?;
// Unwrap: If we are reverting then the block ptr is not `None`.
let block_ptr_from = Self::block_ptr_with_conn(&site.deployment, &conn)?.unwrap();
// Sanity check on block numbers
if block_ptr_from.number <= block_ptr_to.number {
constraint_violation!(
"rewind must go backwards, but would go from block {} to block {}",
block_ptr_from.number,
block_ptr_to.number
);
}
// When rewinding, we reset the firehose cursor to the empty string. That way, on resume,
// Firehose will start from the block_ptr instead (with sanity check to ensure it's resume
// at the exact block).
self.rewind_with_conn(&conn, site, block_ptr_to, Some(""))
}
pub(crate) fn revert_block_operations(
&self,
site: Arc<Site>,
block_ptr_to: BlockPtr,
firehose_cursor: Option<&str>,
) -> Result<StoreEvent, StoreError> {
let conn = self.get_conn()?;
// Unwrap: If we are reverting then the block ptr is not `None`.
let block_ptr_from = Self::block_ptr_with_conn(&site.deployment, &conn)?.unwrap();
// Sanity check on block numbers
if block_ptr_from.number != block_ptr_to.number + 1 {
panic!("revert_block_operations must revert a single block only");
}
self.rewind_with_conn(&conn, site, block_ptr_to, firehose_cursor)
}
pub(crate) async fn deployment_state_from_id(
&self,
id: DeploymentHash,
) -> Result<DeploymentState, StoreError> {
self.with_conn(|conn, _| deployment::state(&conn, id).map_err(|e| e.into()))
.await
}
pub(crate) async fn fail_subgraph(
&self,
id: DeploymentHash,
error: SubgraphError,
) -> Result<(), StoreError> {
self.with_conn(move |conn, _| {
conn.transaction(|| deployment::fail(&conn, &id, &error))
.map_err(Into::into)
})
.await?;
Ok(())
}
pub(crate) fn replica_for_query(
&self,
for_subscription: bool,
) -> Result<ReplicaId, StoreError> {
use std::sync::atomic::Ordering;
let replica_id = match for_subscription {
// Pick a weighted ReplicaId. `replica_order` contains a list of
// replicas with repetitions according to their weight
false => {
let weights_count = self.replica_order.len();
let index =
self.conn_round_robin_counter.fetch_add(1, Ordering::SeqCst) % weights_count;
*self.replica_order.get(index).unwrap()
}
// Subscriptions always go to the main replica.
true => ReplicaId::Main,
};
Ok(replica_id)
}
pub(crate) async fn load_dynamic_data_sources(
&self,
id: DeploymentHash,
) -> Result<Vec<StoredDynamicDataSource>, StoreError> {
self.with_conn(move |conn, _| {
conn.transaction(|| crate::dynds::load(&conn, id.as_str()))
.map_err(Into::into)
})
.await
}
pub(crate) async fn exists_and_synced(&self, id: DeploymentHash) -> Result<bool, StoreError> {
self.with_conn(move |conn, _| {
conn.transaction(|| deployment::exists_and_synced(&conn, &id))
.map_err(Into::into)
})
.await
}
pub(crate) fn graft_pending(
&self,
id: &DeploymentHash,
) -> Result<Option<(DeploymentHash, BlockPtr)>, StoreError> {
let conn = self.get_conn()?;
deployment::graft_pending(&conn, id)
}
/// Bring the subgraph into a state where we can start or resume
/// indexing.
///
/// If `graft_src` is `Some(..)`, copy data from that subgraph. It
/// should only be `Some(..)` if we know we still need to copy data. The
/// code is idempotent so that a copy process that has been interrupted
/// can be resumed seamlessly, but the code sets the block pointer back
/// to the graph point, so that calling this needlessly with `Some(..)`
/// will remove any progress that might have been made since the last
/// time the deployment was started.
pub(crate) fn start_subgraph(
&self,
logger: &Logger,
site: Arc<Site>,
graft_src: Option<(Arc<Layout>, BlockPtr)>,
) -> Result<(), StoreError> {
let dst = self.find_layout(site)?;
// Do any cleanup to bring the subgraph into a known good state
if let Some((src, block)) = graft_src {
info!(
logger,
"Initializing graft by copying data from {} to {}",
src.catalog.site.namespace,
dst.catalog.site.namespace
);
// Copy subgraph data
// We allow both not copying tables at all from the source, as well
// as adding new tables in `self`; we only need to check that tables
// that actually need to be copied from the source are compatible
// with the corresponding tables in `self`
let copy_conn = crate::copy::Connection::new(
logger,
self.pool.clone(),
src.clone(),
dst.clone(),
block.clone(),
)?;
let status = copy_conn.copy_data()?;
if status == crate::copy::Status::Cancelled {
return Err(StoreError::Canceled);
}
let conn = self.get_conn()?;
conn.transaction(|| -> Result<(), StoreError> {
// Copy dynamic data sources and adjust their ID
let start = Instant::now();
let count = dynds::copy(&conn, &src.site, &dst.site, &block)?;
info!(logger, "Copied {} dynamic data sources", count;
"time_ms" => start.elapsed().as_millis());
// Copy errors across
let start = Instant::now();
let count = deployment::copy_errors(&conn, &src.site, &dst.site, &block)?;
info!(logger, "Copied {} existing errors", count;
"time_ms" => start.elapsed().as_millis());
catalog::copy_account_like(&conn, &src.site, &dst.site)?;
// Rewind the subgraph so that entity versions that are
// clamped in the future (beyond `block`) become valid for
// all blocks after `block`. `revert_block` gets rid of
// everything including the block passed to it. We want to
// preserve `block` and therefore revert `block+1`
let start = Instant::now();
let block_to_revert: BlockNumber = (block.number + 1)
.try_into()
.expect("block numbers fit into an i32");
dst.revert_block(&conn, &dst.site.deployment, block_to_revert)?;
info!(logger, "Rewound subgraph to block {}", block.number;
"time_ms" => start.elapsed().as_millis());
let start = Instant::now();
deployment::set_entity_count(&conn, &dst.site, &dst.count_query)?;
info!(logger, "Counted the entities";
"time_ms" => start.elapsed().as_millis());
// Set the block ptr to the graft point to signal that we successfully
// performed the graft
crate::deployment::forward_block_ptr(&conn, &dst.site.deployment, &block)?;
info!(logger, "Subgraph successfully initialized";
"time_ms" => start.elapsed().as_millis());
Ok(())
})?;
}
Ok(())
}
// If the current block of the deployment is the same as the fatal error,
// we revert all block operations to it's parent/previous block.
//
// This should be called once per subgraph on `graph-node` initialization,
// before processing the first block on start.
//
// It will do nothing (early return) if:
//
// - There's no fatal error for the subgraph
// - The error is NOT deterministic
pub(crate) fn unfail_deterministic_error(
&self,
site: Arc<Site>,
current_ptr: &BlockPtr,
parent_ptr: &BlockPtr,
) -> Result<(), StoreError> {
let conn = &self.get_conn()?;
let deployment_id = &site.deployment;
conn.transaction(|| {
// We'll only unfail subgraphs that had fatal errors
let subgraph_error = match detail::fatal_error(conn, deployment_id)? {
Some(fatal_error) => fatal_error,
// If the subgraph is not failed then there is nothing to do.
None => return Ok(()),
};
// Confidence check
if !subgraph_error.deterministic {
return Ok(()); // Nothing to do
}
use deployment::SubgraphHealth::*;
// Decide status based on if there are any errors for the previous/parent block
let prev_health =
if deployment::has_non_fatal_errors(conn, deployment_id, Some(parent_ptr.number))? {
Unhealthy
} else {
Healthy
};
match &subgraph_error.block_hash {
// The error happened for the current deployment head.
// We should revert everything (deployment head, subgraph errors, etc)
// to the previous/parent hash/block.
Some(bytes) if bytes == current_ptr.hash.as_slice() => {
info!(
self.logger,
"Reverting errored block";
"subgraph_id" => deployment_id,
"from_block_number" => format!("{}", current_ptr.number),
"from_block_hash" => format!("{}", current_ptr.hash),
"to_block_number" => format!("{}", parent_ptr.number),
"to_block_hash" => format!("{}", parent_ptr.hash),
);
// We ignore the StoreEvent that's being returned, we'll not use it.
//
// We reset the firehose cursor to the empty string. That way, on resume,
// Firehose will start from the block_ptr instead (with sanity checks to ensure it's resuming
// at the correct block).
let _ = self.revert_block_operations(site.clone(), parent_ptr.clone(), Some(""))?;
// Unfail the deployment.
deployment::update_deployment_status(conn, deployment_id, prev_health, None)?;
}
// Found error, but not for deployment head, we don't need to
// revert the block operations.
//
// If you find this warning in the logs, something is wrong, this
// shoudn't happen.
Some(hash_bytes) => {
warn!(self.logger, "Subgraph error does not have same block hash as deployment head";
"subgraph_id" => deployment_id,
"error_id" => &subgraph_error.id,
"error_block_hash" => format!("0x{}", hex::encode(&hash_bytes)),
"deployment_head" => format!("{}", current_ptr.hash),
);
}
// Same as branch above, if you find this warning in the logs,
// something is wrong, this shouldn't happen.
None => {
warn!(self.logger, "Subgraph error should have block hash";
"subgraph_id" => deployment_id,
"error_id" => &subgraph_error.id,
);
}
};
Ok(())
})
}
// If a non-deterministic error happens and the deployment head advances,
// we should unfail the subgraph (status: Healthy, failed: false) and delete
// the error itself.
//
// This should be called after successfully processing a block for a subgraph.
//
// It will do nothing (early return) if:
//
// - There's no fatal error for the subgraph
// - The error IS deterministic
pub(crate) fn unfail_non_deterministic_error(
&self,
site: Arc<Site>,
current_ptr: &BlockPtr,
) -> Result<(), StoreError> {
let conn = &self.get_conn()?;
let deployment_id = &site.deployment;
conn.transaction(|| {
// We'll only unfail subgraphs that had fatal errors
let subgraph_error = match detail::fatal_error(conn, deployment_id)? {
Some(fatal_error) => fatal_error,
// If the subgraph is not failed then there is nothing to do.
None => return Ok(()),
};
// Confidence check
if subgraph_error.deterministic {
return Ok(()); // Nothing to do
}
match subgraph_error.block_range {
// Deployment head (current_ptr) advanced more than the error.
// That means it's healthy, and the non-deterministic error got
// solved (didn't happen on another try).
(Bound::Included(error_block_number), _)
if current_ptr.number >= error_block_number =>
{
info!(
self.logger,
"Unfailing the deployment status";
"subgraph_id" => deployment_id,
);
// Unfail the deployment.
deployment::update_deployment_status(
conn,
deployment_id,
deployment::SubgraphHealth::Healthy,
None,
)?;
// Delete the fatal error.
deployment::delete_error(conn, &subgraph_error.id)?;
Ok(())
}
// NOOP, the deployment head is still before where non-deterministic error happened.
block_range => {
info!(
self.logger,
"Subgraph error is still ahead of deployment head, nothing to unfail";
"subgraph_id" => deployment_id,
"block_number" => format!("{}", current_ptr.number),
"block_hash" => format!("{}", current_ptr.hash),
"error_block_range" => format!("{:?}", block_range),
"error_block_hash" => subgraph_error.block_hash.as_ref().map(|hash| format!("0x{}", hex::encode(hash))),
);
Ok(())
}
}
})
}
#[cfg(debug_assertions)]
pub fn error_count(&self, id: &DeploymentHash) -> Result<usize, StoreError> {
let conn = self.get_conn()?;
deployment::error_count(&conn, id)
}
pub(crate) async fn mirror_primary_tables(&self, logger: &Logger) {
self.pool.mirror_primary_tables().await.unwrap_or_else(|e| {
warn!(logger, "Mirroring primary tables failed. We will try again in a few minutes";
"error" => e.to_string(),
"shard" => self.pool.shard.as_str())
});
}
pub(crate) async fn health(
&self,
id: &DeploymentHash,
) -> Result<deployment::SubgraphHealth, StoreError> {
let id = id.clone();
self.with_conn(move |conn, _| deployment::health(&conn, &id).map_err(Into::into))
.await
}
}
/// Tries to fetch a [`Table`] either by its Entity name or its SQL name.
///
/// Since we allow our input to be either camel-case or snake-case, we must retry the
/// search using the latter if the search for the former fails.
fn resolve_table_name<'a>(layout: &'a Layout, name: &'_ str) -> Result<&'a Table, StoreError> {
layout
.table_for_entity(&EntityType::new(name.to_owned()))
.map(Deref::deref)
.or_else(|_error| {
let sql_name = SqlName::from(name);
layout
.table(&sql_name)
.ok_or_else(|| StoreError::UnknownTable(name.to_owned()))
})
}
// Resolves column names.
//
// Since we allow our input to be either camel-case or snake-case, we must retry the
// search using the latter if the search for the former fails.
fn resolve_column_names<'a, T: AsRef<str>>(
table: &'a Table,
field_names: &[T],
) -> Result<Vec<&'a str>, StoreError> {
field_names
.iter()
.map(|f| {
table
.column_for_field(f.as_ref())
.or_else(|_error| {
let sql_name = SqlName::from(f.as_ref());
table
.column(&sql_name)
.ok_or_else(|| StoreError::UnknownField(f.as_ref().to_string()))
})
.map(|column| column.name.as_str())
})
.collect()
}
| 37.786043 | 128 | 0.557468 |
26b50a4285d5d01be893fa2d443dd6feb2aeefd3 | 67 | //! This module holds Discord handlers for the bot.
pub mod ping;
| 16.75 | 51 | 0.731343 |
6991ffdcae1f0d965dcfc1964c3f838d9a260f82 | 1,273 | // Copyright 2016 Joe Wilm, The Alacritty Project Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! Alacritty - The GPU Enhanced Terminal
#![deny(clippy::all, clippy::if_not_else, clippy::enum_glob_use, clippy::wrong_pub_self_convention)]
#![cfg_attr(feature = "nightly", feature(core_intrinsics))]
#![cfg_attr(all(test, feature = "bench"), feature(test))]
#[cfg(target_os = "macos")]
#[macro_use]
extern crate objc;
pub mod ansi;
pub mod clipboard;
pub mod config;
pub mod event;
pub mod event_loop;
pub mod grid;
pub mod index;
pub mod locale;
pub mod message_bar;
pub mod meter;
pub mod panic;
pub mod selection;
pub mod sync;
pub mod term;
pub mod tty;
pub mod util;
pub mod vi_mode;
pub use crate::grid::Grid;
pub use crate::term::Term;
| 28.931818 | 100 | 0.739984 |
1669ad495e85e4f0b182fadec3106f535268a205 | 5,903 | use std::{
env, fs,
path::{Path, PathBuf},
sync::Arc,
};
use casper_execution_engine::{
core::engine_state::{EngineConfig, EngineState},
storage::{
global_state::lmdb::LmdbGlobalState, transaction_source::lmdb::LmdbEnvironment,
trie_store::lmdb::LmdbTrieStore,
},
};
use casper_hashing::Digest;
use casper_node::{
storage::Storage,
types::{Deploy, DeployHash},
StorageConfig, WithDir,
};
use crate::DEFAULT_MAX_READERS;
use casper_types::ProtocolVersion;
use lmdb::DatabaseFlags;
/// Gets many deploys by hash.
pub fn get_many_deploys_by_hash(
storage: &Storage,
hashes: &[DeployHash],
) -> Result<Vec<Deploy>, anyhow::Error> {
let mut deploys = vec![];
for deploy_hash in hashes {
let deploy = match storage.read_deploy_by_hash(*deploy_hash)? {
None => {
return Err(anyhow::anyhow!(
"Deploy is present in block but hasn't been downloaded."
))
}
Some(deploy) => deploy,
};
deploys.push(deploy);
}
Ok(deploys)
}
/// Create an lmdb environment at a given path.
fn create_lmdb_environment(
lmdb_path: impl AsRef<Path>,
default_max_db_size: usize,
manual_sync_enabled: bool,
) -> Result<Arc<LmdbEnvironment>, anyhow::Error> {
let lmdb_environment = Arc::new(LmdbEnvironment::new(
&lmdb_path,
default_max_db_size,
DEFAULT_MAX_READERS,
manual_sync_enabled,
)?);
Ok(lmdb_environment)
}
/// Loads an existing execution engine.
pub fn load_execution_engine(
ee_lmdb_path: impl AsRef<Path>,
default_max_db_size: usize,
state_root_hash: Digest,
manual_sync_enabled: bool,
) -> Result<(Arc<EngineState<LmdbGlobalState>>, Arc<LmdbEnvironment>), anyhow::Error> {
let lmdb_data_file = ee_lmdb_path.as_ref().join("data.lmdb");
if !ee_lmdb_path.as_ref().join("data.lmdb").exists() {
return Err(anyhow::anyhow!(
"lmdb data file not found at: {}",
lmdb_data_file.display()
));
}
let lmdb_environment =
create_lmdb_environment(&ee_lmdb_path, default_max_db_size, manual_sync_enabled)?;
let lmdb_trie_store = Arc::new(LmdbTrieStore::open(&lmdb_environment, None)?);
let global_state = LmdbGlobalState::new(
Arc::clone(&lmdb_environment),
lmdb_trie_store,
state_root_hash,
);
Ok((
Arc::new(EngineState::new(global_state, EngineConfig::default())),
lmdb_environment,
))
}
/// Creates a new execution engine.
pub fn create_execution_engine(
ee_lmdb_path: impl AsRef<Path>,
default_max_db_size: usize,
manual_sync_enabled: bool,
) -> Result<(Arc<EngineState<LmdbGlobalState>>, Arc<LmdbEnvironment>), anyhow::Error> {
if !ee_lmdb_path.as_ref().exists() {
println!(
"creating new lmdb data dir {}",
ee_lmdb_path.as_ref().display()
);
fs::create_dir_all(&ee_lmdb_path)?;
}
fs::create_dir_all(&ee_lmdb_path)?;
let lmdb_environment =
create_lmdb_environment(&ee_lmdb_path, default_max_db_size, manual_sync_enabled)?;
lmdb_environment.env().sync(true)?;
let lmdb_trie_store = Arc::new(LmdbTrieStore::new(
&lmdb_environment,
None,
DatabaseFlags::empty(),
)?);
let global_state = LmdbGlobalState::empty(Arc::clone(&lmdb_environment), lmdb_trie_store)?;
Ok((
Arc::new(EngineState::new(global_state, EngineConfig::default())),
lmdb_environment,
))
}
pub fn normalize_path(path: impl AsRef<Path>) -> Result<PathBuf, anyhow::Error> {
let path = path.as_ref();
if path.is_absolute() {
Ok(path.into())
} else {
Ok(env::current_dir()?.join(path))
}
}
pub fn create_storage(chain_download_path: impl AsRef<Path>) -> Result<Storage, anyhow::Error> {
let chain_download_path = normalize_path(chain_download_path)?;
let mut storage_config = StorageConfig::default();
storage_config.path = chain_download_path.clone();
Ok(Storage::new(
&WithDir::new(chain_download_path, storage_config),
None,
ProtocolVersion::from_parts(0, 0, 0),
false,
"test",
)?)
}
#[cfg(test)]
mod tests {
use crate::{
get_block_by_identifier, put_block_with_deploys, storage::create_storage, BlockWithDeploys,
};
use casper_node::rpcs::{
chain::{BlockIdentifier, GetBlockResult},
docs::DocExample,
info::GetDeployResult,
};
#[test]
fn block_with_deploys_round_trip_lmdb() {
let dir = tempfile::tempdir().unwrap().into_path();
let mut storage = create_storage(dir).expect("should create storage");
let example_deploy = GetDeployResult::doc_example().deploy.clone();
let example_block = GetBlockResult::doc_example()
.block
.as_ref()
.unwrap()
.clone();
let block_with_deploys = BlockWithDeploys {
block: example_block.clone(),
transfers: vec![example_deploy.clone()],
deploys: vec![],
};
put_block_with_deploys(&mut storage, &block_with_deploys).unwrap();
let stored_block =
get_block_by_identifier(&storage, &BlockIdentifier::Hash(example_block.hash));
assert!(matches!(stored_block, Ok(Some(ref _block))));
let stored_block_by_height = get_block_by_identifier(
&storage,
&BlockIdentifier::Height(example_block.header.height),
);
assert!(matches!(stored_block, Ok(Some(ref _block))));
assert_eq!(
stored_block.unwrap().unwrap(),
stored_block_by_height.unwrap().unwrap()
);
let stored_deploy = storage.read_deploy_by_hash(*example_deploy.id());
assert!(matches!(stored_deploy, Ok(Some(_deploy))));
}
}
| 31.068421 | 99 | 0.637642 |
cc03ac38943045d23e307050a5559c34843aebab | 201 | pub mod num {
pub trait Num2 {
fn from_int2(n: int) -> Self;
}
}
pub mod float {
impl ::num::Num2 for float {
fn from_int2(n: int) -> float { return n as float; }
}
}
| 16.75 | 61 | 0.517413 |
912ac73464043a3e4c43076dae0f67ac37dbc7f2 | 4,552 | #[doc = "Reader of register HC1_SPLT"]
pub type R = crate::R<u32, super::HC1_SPLT>;
#[doc = "Writer for register HC1_SPLT"]
pub type W = crate::W<u32, super::HC1_SPLT>;
#[doc = "Register HC1_SPLT `reset()`'s with value 0"]
impl crate::ResetValue for super::HC1_SPLT {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `PRTADDR`"]
pub type PRTADDR_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `PRTADDR`"]
pub struct PRTADDR_W<'a> {
w: &'a mut W,
}
impl<'a> PRTADDR_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x7f) | ((value as u32) & 0x7f);
self.w
}
}
#[doc = "Reader of field `HUBADDR`"]
pub type HUBADDR_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `HUBADDR`"]
pub struct HUBADDR_W<'a> {
w: &'a mut W,
}
impl<'a> HUBADDR_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x7f << 7)) | (((value as u32) & 0x7f) << 7);
self.w
}
}
#[doc = "Reader of field `XACTPOS`"]
pub type XACTPOS_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `XACTPOS`"]
pub struct XACTPOS_W<'a> {
w: &'a mut W,
}
impl<'a> XACTPOS_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 14)) | (((value as u32) & 0x03) << 14);
self.w
}
}
#[doc = "Reader of field `COMPSPLT`"]
pub type COMPSPLT_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `COMPSPLT`"]
pub struct COMPSPLT_W<'a> {
w: &'a mut W,
}
impl<'a> COMPSPLT_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Reader of field `SPLTENA`"]
pub type SPLTENA_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SPLTENA`"]
pub struct SPLTENA_W<'a> {
w: &'a mut W,
}
impl<'a> SPLTENA_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31);
self.w
}
}
impl R {
#[doc = "Bits 0:6 - Port Address"]
#[inline(always)]
pub fn prtaddr(&self) -> PRTADDR_R {
PRTADDR_R::new((self.bits & 0x7f) as u8)
}
#[doc = "Bits 7:13 - Hub Address"]
#[inline(always)]
pub fn hubaddr(&self) -> HUBADDR_R {
HUBADDR_R::new(((self.bits >> 7) & 0x7f) as u8)
}
#[doc = "Bits 14:15 - Transaction Position"]
#[inline(always)]
pub fn xactpos(&self) -> XACTPOS_R {
XACTPOS_R::new(((self.bits >> 14) & 0x03) as u8)
}
#[doc = "Bit 16 - Do Complete Split"]
#[inline(always)]
pub fn compsplt(&self) -> COMPSPLT_R {
COMPSPLT_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 31 - Split Enable"]
#[inline(always)]
pub fn spltena(&self) -> SPLTENA_R {
SPLTENA_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:6 - Port Address"]
#[inline(always)]
pub fn prtaddr(&mut self) -> PRTADDR_W {
PRTADDR_W { w: self }
}
#[doc = "Bits 7:13 - Hub Address"]
#[inline(always)]
pub fn hubaddr(&mut self) -> HUBADDR_W {
HUBADDR_W { w: self }
}
#[doc = "Bits 14:15 - Transaction Position"]
#[inline(always)]
pub fn xactpos(&mut self) -> XACTPOS_W {
XACTPOS_W { w: self }
}
#[doc = "Bit 16 - Do Complete Split"]
#[inline(always)]
pub fn compsplt(&mut self) -> COMPSPLT_W {
COMPSPLT_W { w: self }
}
#[doc = "Bit 31 - Split Enable"]
#[inline(always)]
pub fn spltena(&mut self) -> SPLTENA_W {
SPLTENA_W { w: self }
}
}
| 28.993631 | 86 | 0.545914 |
fc4de69a7ee8007740c75e4bf66e250afe6d2b82 | 1,784 | use criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};
use rand::{thread_rng, Rng};
use storage_proofs::drgraph::{new_seed, Graph, BASE_DEGREE};
use storage_proofs::hasher::blake2s::Blake2sHasher;
use storage_proofs::hasher::pedersen::PedersenHasher;
use storage_proofs::stacked::{StackedBucketGraph, EXP_DEGREE};
fn merkle_benchmark(c: &mut Criterion) {
#[cfg(feature = "big-sector-sizes-bench")]
let params = vec![128, 1024, 1048576];
#[cfg(not(feature = "big-sector-sizes-bench"))]
let params = vec![128, 1024];
c.bench(
"merkletree",
ParameterizedBenchmark::new(
"blake2s",
move |b, n_nodes| {
let mut rng = thread_rng();
let data: Vec<u8> = (0..32 * *n_nodes).map(|_| rng.gen()).collect();
let graph = StackedBucketGraph::<Blake2sHasher>::new_stacked(
*n_nodes,
BASE_DEGREE,
EXP_DEGREE,
new_seed(),
)
.unwrap();
b.iter(|| black_box(graph.merkle_tree(&data).unwrap()))
},
params,
)
.with_function("pedersen", move |b, n_nodes| {
let mut rng = thread_rng();
let data: Vec<u8> = (0..32 * *n_nodes).map(|_| rng.gen()).collect();
let graph = StackedBucketGraph::<PedersenHasher>::new_stacked(
*n_nodes,
BASE_DEGREE,
EXP_DEGREE,
new_seed(),
)
.unwrap();
b.iter(|| black_box(graph.merkle_tree(&data).unwrap()))
})
.sample_size(20),
);
}
criterion_group!(benches, merkle_benchmark);
criterion_main!(benches);
| 34.307692 | 95 | 0.548206 |
038834b92c1648a934b56eec73337c1d36c8c5f2 | 892 | #[doc(hidden)]
#[macro_export]
macro_rules! _parse_unary_op {
(-, $($t:tt)+) => (_impl_unary_op_internal!(Neg, neg, $($t)+););
(!, $($t:tt)+) => (_impl_unary_op_internal!(Not, not, $($t)+););
}
#[doc(hidden)]
#[macro_export]
macro_rules! _impl_unary_op_internal {
($ops_trait:ident, $ops_fn:ident, &$lhs:ty, $out:ty, $lhs_i:ident, $body:block) => (
impl<'a> ops::$ops_trait for &'a $lhs {
type Output = $out;
fn $ops_fn(self) -> Self::Output {
let $lhs_i = self;
$body
}
}
);
($ops_trait:ident, $ops_fn:ident, $lhs:ty, $out:ty, $lhs_i:ident, $body:block) => (
impl ops::$ops_trait for $lhs {
type Output = $out;
fn $ops_fn(self) -> Self::Output {
let $lhs_i = self;
$body
}
}
);
}
| 27.875 | 96 | 0.475336 |
6a7ed588182518812229e8d6125e24332bdab54e | 2,651 | use libp2p::gossipsub::Topic;
use serde_derive::{Deserialize, Serialize};
/// The gossipsub topic names.
// These constants form a topic name of the form /TOPIC_PREFIX/TOPIC/ENCODING_POSTFIX
// For example /eth2/beacon_block/ssz
pub const TOPIC_PREFIX: &str = "eth2";
pub const TOPIC_ENCODING_POSTFIX: &str = "ssz";
pub const BEACON_BLOCK_TOPIC: &str = "beacon_block";
pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation";
pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit";
pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing";
pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing";
pub const SHARD_TOPIC_PREFIX: &str = "shard";
/// Enum that brings these topics into the rust type system.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum GossipTopic {
BeaconBlock,
BeaconAttestation,
VoluntaryExit,
ProposerSlashing,
AttesterSlashing,
Shard,
Unknown(String),
}
impl From<&str> for GossipTopic {
fn from(topic: &str) -> GossipTopic {
let topic_parts: Vec<&str> = topic.split('/').collect();
if topic_parts.len() == 4
&& topic_parts[1] == TOPIC_PREFIX
&& topic_parts[3] == TOPIC_ENCODING_POSTFIX
{
match topic_parts[2] {
BEACON_BLOCK_TOPIC => GossipTopic::BeaconBlock,
BEACON_ATTESTATION_TOPIC => GossipTopic::BeaconAttestation,
VOLUNTARY_EXIT_TOPIC => GossipTopic::VoluntaryExit,
PROPOSER_SLASHING_TOPIC => GossipTopic::ProposerSlashing,
ATTESTER_SLASHING_TOPIC => GossipTopic::AttesterSlashing,
unknown_topic => GossipTopic::Unknown(unknown_topic.into()),
}
} else {
GossipTopic::Unknown(topic.into())
}
}
}
impl Into<Topic> for GossipTopic {
fn into(self) -> Topic {
Topic::new(self.into())
}
}
impl Into<String> for GossipTopic {
fn into(self) -> String {
match self {
GossipTopic::BeaconBlock => topic_builder(BEACON_BLOCK_TOPIC),
GossipTopic::BeaconAttestation => topic_builder(BEACON_ATTESTATION_TOPIC),
GossipTopic::VoluntaryExit => topic_builder(VOLUNTARY_EXIT_TOPIC),
GossipTopic::ProposerSlashing => topic_builder(PROPOSER_SLASHING_TOPIC),
GossipTopic::AttesterSlashing => topic_builder(ATTESTER_SLASHING_TOPIC),
GossipTopic::Shard => topic_builder(SHARD_TOPIC_PREFIX),
GossipTopic::Unknown(topic) => topic,
}
}
}
fn topic_builder(topic: &'static str) -> String {
format!("/{}/{}/{}", TOPIC_PREFIX, topic, TOPIC_ENCODING_POSTFIX,)
}
| 36.819444 | 86 | 0.667295 |
281a9e86bbf9edaae5c185508da32261ea4f1229 | 1,128 | use cpython::*;
use failure::Fail;
py_exception!(fast_stat, StatisticsError);
#[derive(Fail, Debug)]
crate enum MyError {
#[fail(display = "harmonic_mean requires at least one data point")]
HarmonicNoDataPoints,
#[fail(display = "harmonic mean does not support negative values")]
HarmonicNegatives,
#[fail(
display = "no unique mode; found {} equally common values",
modes
)]
NoUniqueMode { modes: usize },
#[fail(display = "no mode for empty data")]
NoModeEmptyData,
#[fail(display = "no median for empty data")]
NoMedianEmptyData,
#[fail(display = "variance requires at least two data points")]
NoEnoughDataForVariance,
#[fail(display = "population variance requires at least one data point")]
NoEnoughDataForPopulationVariance,
#[fail(display = "mean requires at least one data point")]
NoEnoughDataForMean,
}
#[inline]
crate fn to_python_result<T>(py: Python<'_>, res: Result<T, MyError>) -> PyResult<T> {
match res {
Err(err) => Err(PyErr::new::<StatisticsError, _>(py, format!("{}", err))),
Ok(x) => Ok(x),
}
}
| 31.333333 | 86 | 0.655142 |
aca4d176724ff793156e6e73d4392d05a800498c | 14,712 | use crate::utils::span_lint;
use itertools::Itertools;
use pulldown_cmark;
use rustc::hir;
use rustc::lint::{LateContext, LateLintPass, LintArray, LintPass};
use rustc::{declare_tool_lint, impl_lint_pass};
use rustc_data_structures::fx::FxHashSet;
use std::ops::Range;
use syntax::ast::{AttrKind, Attribute};
use syntax::source_map::{BytePos, Span};
use syntax_pos::Pos;
use url::Url;
declare_clippy_lint! {
/// **What it does:** Checks for the presence of `_`, `::` or camel-case words
/// outside ticks in documentation.
///
/// **Why is this bad?** *Rustdoc* supports markdown formatting, `_`, `::` and
/// camel-case probably indicates some code which should be included between
/// ticks. `_` can also be used for emphasis in markdown, this lint tries to
/// consider that.
///
/// **Known problems:** Lots of bad docs won’t be fixed, what the lint checks
/// for is limited, and there are still false positives.
///
/// **Examples:**
/// ```rust
/// /// Do something with the foo_bar parameter. See also
/// /// that::other::module::foo.
/// // ^ `foo_bar` and `that::other::module::foo` should be ticked.
/// fn doit(foo_bar: usize) {}
/// ```
pub DOC_MARKDOWN,
pedantic,
"presence of `_`, `::` or camel-case outside backticks in documentation"
}
declare_clippy_lint! {
/// **What it does:** Checks for the doc comments of publicly visible
/// unsafe functions and warns if there is no `# Safety` section.
///
/// **Why is this bad?** Unsafe functions should document their safety
/// preconditions, so that users can be sure they are using them safely.
///
/// **Known problems:** None.
///
/// **Examples**:
/// ```rust
///# type Universe = ();
/// /// This function should really be documented
/// pub unsafe fn start_apocalypse(u: &mut Universe) {
/// unimplemented!();
/// }
/// ```
///
/// At least write a line about safety:
///
/// ```rust
///# type Universe = ();
/// /// # Safety
/// ///
/// /// This function should not be called before the horsemen are ready.
/// pub unsafe fn start_apocalypse(u: &mut Universe) {
/// unimplemented!();
/// }
/// ```
pub MISSING_SAFETY_DOC,
style,
"`pub unsafe fn` without `# Safety` docs"
}
declare_clippy_lint! {
/// **What it does:** Checks for `fn main() { .. }` in doctests
///
/// **Why is this bad?** The test can be shorter (and likely more readable)
/// if the `fn main()` is left implicit.
///
/// **Known problems:** None.
///
/// **Examples:**
/// ``````rust
/// /// An example of a doctest with a `main()` function
/// ///
/// /// # Examples
/// ///
/// /// ```
/// /// fn main() {
/// /// // this needs not be in an `fn`
/// /// }
/// /// ```
/// fn needless_main() {
/// unimplemented!();
/// }
/// ``````
pub NEEDLESS_DOCTEST_MAIN,
style,
"presence of `fn main() {` in code examples"
}
#[allow(clippy::module_name_repetitions)]
#[derive(Clone)]
pub struct DocMarkdown {
valid_idents: FxHashSet<String>,
in_trait_impl: bool,
}
impl DocMarkdown {
pub fn new(valid_idents: FxHashSet<String>) -> Self {
Self {
valid_idents,
in_trait_impl: false,
}
}
}
impl_lint_pass!(DocMarkdown => [DOC_MARKDOWN, MISSING_SAFETY_DOC, NEEDLESS_DOCTEST_MAIN]);
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for DocMarkdown {
fn check_crate(&mut self, cx: &LateContext<'a, 'tcx>, krate: &'tcx hir::Crate) {
check_attrs(cx, &self.valid_idents, &krate.attrs);
}
fn check_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx hir::Item) {
if check_attrs(cx, &self.valid_idents, &item.attrs) {
return;
}
// no safety header
match item.kind {
hir::ItemKind::Fn(_, ref header, ..) => {
if cx.access_levels.is_exported(item.hir_id) && header.unsafety == hir::Unsafety::Unsafe {
span_lint(
cx,
MISSING_SAFETY_DOC,
item.span,
"unsafe function's docs miss `# Safety` section",
);
}
},
hir::ItemKind::Impl(_, _, _, _, ref trait_ref, ..) => {
self.in_trait_impl = trait_ref.is_some();
},
_ => {},
}
}
fn check_item_post(&mut self, _cx: &LateContext<'a, 'tcx>, item: &'tcx hir::Item) {
if let hir::ItemKind::Impl(..) = item.kind {
self.in_trait_impl = false;
}
}
fn check_trait_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx hir::TraitItem) {
if check_attrs(cx, &self.valid_idents, &item.attrs) {
return;
}
// no safety header
if let hir::TraitItemKind::Method(ref sig, ..) = item.kind {
if cx.access_levels.is_exported(item.hir_id) && sig.header.unsafety == hir::Unsafety::Unsafe {
span_lint(
cx,
MISSING_SAFETY_DOC,
item.span,
"unsafe function's docs miss `# Safety` section",
);
}
}
}
fn check_impl_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx hir::ImplItem) {
if check_attrs(cx, &self.valid_idents, &item.attrs) || self.in_trait_impl {
return;
}
// no safety header
if let hir::ImplItemKind::Method(ref sig, ..) = item.kind {
if cx.access_levels.is_exported(item.hir_id) && sig.header.unsafety == hir::Unsafety::Unsafe {
span_lint(
cx,
MISSING_SAFETY_DOC,
item.span,
"unsafe function's docs miss `# Safety` section",
);
}
}
}
}
/// Cleanup documentation decoration (`///` and such).
///
/// We can't use `syntax::attr::AttributeMethods::with_desugared_doc` or
/// `syntax::parse::lexer::comments::strip_doc_comment_decoration` because we
/// need to keep track of
/// the spans but this function is inspired from the later.
#[allow(clippy::cast_possible_truncation)]
#[must_use]
pub fn strip_doc_comment_decoration(comment: &str, span: Span) -> (String, Vec<(usize, Span)>) {
// one-line comments lose their prefix
const ONELINERS: &[&str] = &["///!", "///", "//!", "//"];
for prefix in ONELINERS {
if comment.starts_with(*prefix) {
let doc = &comment[prefix.len()..];
let mut doc = doc.to_owned();
doc.push('\n');
return (
doc.to_owned(),
vec![(doc.len(), span.with_lo(span.lo() + BytePos(prefix.len() as u32)))],
);
}
}
if comment.starts_with("/*") {
let doc = &comment[3..comment.len() - 2];
let mut sizes = vec![];
let mut contains_initial_stars = false;
for line in doc.lines() {
let offset = line.as_ptr() as usize - comment.as_ptr() as usize;
debug_assert_eq!(offset as u32 as usize, offset);
contains_initial_stars |= line.trim_start().starts_with('*');
// +1 for the newline
sizes.push((line.len() + 1, span.with_lo(span.lo() + BytePos(offset as u32))));
}
if !contains_initial_stars {
return (doc.to_string(), sizes);
}
// remove the initial '*'s if any
let mut no_stars = String::with_capacity(doc.len());
for line in doc.lines() {
let mut chars = line.chars();
while let Some(c) = chars.next() {
if c.is_whitespace() {
no_stars.push(c);
} else {
no_stars.push(if c == '*' { ' ' } else { c });
break;
}
}
no_stars.push_str(chars.as_str());
no_stars.push('\n');
}
return (no_stars, sizes);
}
panic!("not a doc-comment: {}", comment);
}
pub fn check_attrs<'a>(cx: &LateContext<'_, '_>, valid_idents: &FxHashSet<String>, attrs: &'a [Attribute]) -> bool {
let mut doc = String::new();
let mut spans = vec![];
for attr in attrs {
if let AttrKind::DocComment(ref comment) = attr.kind {
let comment = comment.to_string();
let (comment, current_spans) = strip_doc_comment_decoration(&comment, attr.span);
spans.extend_from_slice(¤t_spans);
doc.push_str(&comment);
} else if attr.check_name(sym!(doc)) {
// ignore mix of sugared and non-sugared doc
return true; // don't trigger the safety check
}
}
let mut current = 0;
for &mut (ref mut offset, _) in &mut spans {
let offset_copy = *offset;
*offset = current;
current += offset_copy;
}
if doc.is_empty() {
return false;
}
let parser = pulldown_cmark::Parser::new(&doc).into_offset_iter();
// Iterate over all `Events` and combine consecutive events into one
let events = parser.coalesce(|previous, current| {
use pulldown_cmark::Event::*;
let previous_range = previous.1;
let current_range = current.1;
match (previous.0, current.0) {
(Text(previous), Text(current)) => {
let mut previous = previous.to_string();
previous.push_str(¤t);
Ok((Text(previous.into()), previous_range))
},
(previous, current) => Err(((previous, previous_range), (current, current_range))),
}
});
check_doc(cx, valid_idents, events, &spans)
}
fn check_doc<'a, Events: Iterator<Item = (pulldown_cmark::Event<'a>, Range<usize>)>>(
cx: &LateContext<'_, '_>,
valid_idents: &FxHashSet<String>,
events: Events,
spans: &[(usize, Span)],
) -> bool {
// true if a safety header was found
use pulldown_cmark::Event::*;
use pulldown_cmark::Tag::*;
let mut safety_header = false;
let mut in_code = false;
let mut in_link = None;
let mut in_heading = false;
for (event, range) in events {
match event {
Start(CodeBlock(_)) => in_code = true,
End(CodeBlock(_)) => in_code = false,
Start(Link(_, url, _)) => in_link = Some(url),
End(Link(..)) => in_link = None,
Start(Heading(_)) => in_heading = true,
End(Heading(_)) => in_heading = false,
Start(_tag) | End(_tag) => (), // We don't care about other tags
Html(_html) => (), // HTML is weird, just ignore it
SoftBreak | HardBreak | TaskListMarker(_) | Code(_) | Rule => (),
FootnoteReference(text) | Text(text) => {
if Some(&text) == in_link.as_ref() {
// Probably a link of the form `<http://example.com>`
// Which are represented as a link to "http://example.com" with
// text "http://example.com" by pulldown-cmark
continue;
}
safety_header |= in_heading && text.trim() == "Safety";
let index = match spans.binary_search_by(|c| c.0.cmp(&range.start)) {
Ok(o) => o,
Err(e) => e - 1,
};
let (begin, span) = spans[index];
if in_code {
check_code(cx, &text, span);
} else {
// Adjust for the beginning of the current `Event`
let span = span.with_lo(span.lo() + BytePos::from_usize(range.start - begin));
check_text(cx, valid_idents, &text, span);
}
},
}
}
safety_header
}
fn check_code(cx: &LateContext<'_, '_>, text: &str, span: Span) {
if text.contains("fn main() {") {
span_lint(cx, NEEDLESS_DOCTEST_MAIN, span, "needless `fn main` in doctest");
}
}
fn check_text(cx: &LateContext<'_, '_>, valid_idents: &FxHashSet<String>, text: &str, span: Span) {
for word in text.split(|c: char| c.is_whitespace() || c == '\'') {
// Trim punctuation as in `some comment (see foo::bar).`
// ^^
// Or even as in `_foo bar_` which is emphasized.
let word = word.trim_matches(|c: char| !c.is_alphanumeric());
if valid_idents.contains(word) {
continue;
}
// Adjust for the current word
let offset = word.as_ptr() as usize - text.as_ptr() as usize;
let span = Span::new(
span.lo() + BytePos::from_usize(offset),
span.lo() + BytePos::from_usize(offset + word.len()),
span.ctxt(),
);
check_word(cx, word, span);
}
}
fn check_word(cx: &LateContext<'_, '_>, word: &str, span: Span) {
/// Checks if a string is camel-case, i.e., contains at least two uppercase
/// letters (`Clippy` is ok) and one lower-case letter (`NASA` is ok).
/// Plurals are also excluded (`IDs` is ok).
fn is_camel_case(s: &str) -> bool {
if s.starts_with(|c: char| c.is_digit(10)) {
return false;
}
let s = if s.ends_with('s') { &s[..s.len() - 1] } else { s };
s.chars().all(char::is_alphanumeric)
&& s.chars().filter(|&c| c.is_uppercase()).take(2).count() > 1
&& s.chars().filter(|&c| c.is_lowercase()).take(1).count() > 0
}
fn has_underscore(s: &str) -> bool {
s != "_" && !s.contains("\\_") && s.contains('_')
}
fn has_hyphen(s: &str) -> bool {
s != "-" && s.contains('-')
}
if let Ok(url) = Url::parse(word) {
// try to get around the fact that `foo::bar` parses as a valid URL
if !url.cannot_be_a_base() {
span_lint(
cx,
DOC_MARKDOWN,
span,
"you should put bare URLs between `<`/`>` or make a proper Markdown link",
);
return;
}
}
// We assume that mixed-case words are not meant to be put inside bacticks. (Issue #2343)
if has_underscore(word) && has_hyphen(word) {
return;
}
if has_underscore(word) || word.contains("::") || is_camel_case(word) {
span_lint(
cx,
DOC_MARKDOWN,
span,
&format!("you should put `{}` between ticks in the documentation", word),
);
}
}
| 34.535211 | 116 | 0.530315 |
4ba6eec12e97e46a29208ea4e0f063dfd3ed748c | 5,075 | extern crate walkdir;
#[cfg(windows)]
extern crate winres;
#[cfg(windows)]
extern crate cc;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate toml;
use walkdir::WalkDir;
use std::env;
use std::path::PathBuf;
use std::fs::copy;
use std::fs::create_dir_all;
use std::fs::File;
use std::io::BufRead;
use std::io::BufReader;
use std::io::Read;
use std::io::Write;
use std::env::consts::OS;
const FILES_TO_PREPROCESS: &'static [&'static str] = &["helpers.js", "views.js"];
/// Describes the application itself.
#[derive(Debug, Deserialize)]
pub struct BaseAttributes {
pub name: String,
pub target_url: String,
}
#[cfg(windows)]
fn handle_binary(config: &BaseAttributes) {
let mut res = winres::WindowsResource::new();
res.set_icon("static/favicon.ico");
res.set(
"FileDescription",
&format!("Interactive installer for {}", config.name),
);
res.set("ProductName", &format!("{} installer", config.name));
res.set(
"OriginalFilename",
&format!("{}_installer.exe", config.name),
);
res.compile().expect("Failed to generate metadata");
cc::Build::new()
.cpp(true)
.file("src/native/interop.cpp")
.compile("interop");
}
#[cfg(not(windows))]
fn handle_binary(_config: &BaseAttributes) {}
fn main() {
let output_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
let os = OS.to_lowercase();
// Find target config
let target_config = PathBuf::from(format!("bootstrap.{}.toml", os));
if !target_config.exists() {
panic!(
"There is no config file specified for the platform: {:?}. \
Create a file named \"bootstrap.{}.toml\" in the root directory.",
os, os
);
}
// Read in the config for our own purposes
let file_contents = {
let mut file = File::open(&target_config).expect("Unable to open config file");
let mut buf = Vec::new();
file.read_to_end(&mut buf)
.expect("Unable to read config file contents");
buf
};
let config: BaseAttributes =
toml::from_slice(&file_contents).expect("Unable to parse config file");
handle_binary(&config);
// Copy for the main build
copy(&target_config, output_dir.join("bootstrap.toml")).expect("Unable to copy config file");
// Copy files from static/ to build dir
for entry in WalkDir::new("static") {
let entry = entry.expect("Unable to read output directory");
let output_file = output_dir.join(entry.path());
if entry.path().is_dir() {
create_dir_all(output_file).expect("Unable to create dir");
} else {
let filename = entry
.path()
.file_name()
.expect("Unable to parse filename")
.to_str()
.expect("Unable to convert to string");
if FILES_TO_PREPROCESS.contains(&filename) {
// Do basic preprocessing - transcribe template string
let source = BufReader::new(File::open(entry.path()).expect("Unable to copy file"));
let mut target = File::create(output_file).expect("Unable to copy file");
let mut is_template_string = false;
for line in source.lines() {
let line = line.expect("Unable to read line from JS file");
let mut is_break = false;
let mut is_quote = false;
let mut output_line = String::new();
if is_template_string {
output_line += "\"";
}
for c in line.chars() {
if c == '\\' {
is_break = true;
output_line.push('\\');
continue;
}
if (c == '\"' || c == '\'') && !is_break && !is_template_string {
is_quote = !is_quote;
}
if c == '`' && !is_break && !is_quote {
output_line += "\"";
is_template_string = !is_template_string;
continue;
}
if c == '"' && !is_break && is_template_string {
output_line += "\\\"";
continue;
}
is_break = false;
output_line.push(c);
}
if is_template_string {
output_line += "\" +";
}
output_line.push('\n');
target
.write(output_line.as_bytes())
.expect("Unable to write line");
}
} else {
copy(entry.path(), output_file).expect("Unable to copy file");
}
}
}
}
| 29.505814 | 100 | 0.504236 |
f5bb5df3fa953f3aabaf22f8faf3a51f03a28cf9 | 998 | use pyo3::prelude::*;
/// Returns hexadecimal value of given RGB tuple. `r`, `g`, and `b` must be
/// in the range 0 - 255.
#[pyfunction]
fn rgb_to_hex(red: u8, green: u8, blue: u8) -> PyResult<u32> {
Ok(((red as u32) << 16) | ((green as u32) << 8) | blue as u32)
}
/// Returns a tuple `(r, g, b)` of the RGB integer values equivalent to the
/// given RGB hexadecimal value. `r`, `g`, and `b` are in the range 0 - 255.
#[pyfunction]
fn hex_to_rgb(hex: u32) -> PyResult<(u8, u8, u8)> {
let red: u8 = ((hex >> 16) & 0xff) as u8;
let green: u8 = ((hex >> 8) & 0xff) as u8;
let blue: u8 = (hex & 0xff) as u8;
Ok((red, green, blue))
}
/// This module provides functions for converting between the hexadecimal format
/// used by autopy methods and other more readable formats (e.g., RGB tuples).
#[pymodule(color)]
fn init(_py: Python, m: &PyModule) -> PyResult<()> {
m.add_wrapped(wrap_pyfunction!(rgb_to_hex))?;
m.add_wrapped(wrap_pyfunction!(hex_to_rgb))?;
Ok(())
}
| 35.642857 | 80 | 0.625251 |
16698c643d20bb923468eb1c6ded0c0657e1859f | 4,540 | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use crate::perf_context_metrics::{
APPLY_PERF_CONTEXT_TIME_HISTOGRAM_STATIC, STORE_PERF_CONTEXT_TIME_HISTOGRAM_STATIC,
};
use crate::{
raw_util, set_perf_flags, set_perf_level, PerfContext as RawPerfContext, PerfFlag, PerfFlags,
};
use engine_traits::{PerfContextKind, PerfLevel};
use lazy_static::lazy_static;
#[macro_export]
macro_rules! report_perf_context {
($ctx: expr, $metric: ident) => {
if $ctx.perf_level != PerfLevel::Disable {
let perf_context = RawPerfContext::get();
let pre_and_post_process = perf_context.write_pre_and_post_process_time();
let write_thread_wait = perf_context.write_thread_wait_nanos();
observe_perf_context_type!($ctx, perf_context, $metric, write_wal_time);
observe_perf_context_type!($ctx, perf_context, $metric, write_memtable_time);
observe_perf_context_type!($ctx, perf_context, $metric, db_mutex_lock_nanos);
observe_perf_context_type!($ctx, $metric, pre_and_post_process);
observe_perf_context_type!($ctx, $metric, write_thread_wait);
observe_perf_context_type!(
$ctx,
perf_context,
$metric,
write_scheduling_flushes_compactions_time
);
observe_perf_context_type!($ctx, perf_context, $metric, db_condition_wait_nanos);
observe_perf_context_type!($ctx, perf_context, $metric, write_delay_time);
}
};
}
#[macro_export]
macro_rules! observe_perf_context_type {
($s:expr, $metric: expr, $v:ident) => {
$metric.$v.observe((($v) - $s.$v) as f64 / 1_000_000_000.0);
$s.$v = $v;
};
($s:expr, $context: expr, $metric: expr, $v:ident) => {
let $v = $context.$v();
$metric.$v.observe((($v) - $s.$v) as f64 / 1_000_000_000.0);
$s.$v = $v;
};
}
lazy_static! {
/// Default perf flags for a write operation.
static ref DEFAULT_WRITE_PERF_FLAGS: PerfFlags = PerfFlag::WriteWalTime
| PerfFlag::WritePreAndPostProcessTime
| PerfFlag::WriteMemtableTime
| PerfFlag::WriteThreadWaitNanos
| PerfFlag::DbMutexLockNanos
| PerfFlag::WriteSchedulingFlushesCompactionsTime
| PerfFlag::DbConditionWaitNanos
| PerfFlag::WriteDelayTime;
}
pub struct PerfContextStatistics {
pub perf_level: PerfLevel,
pub kind: PerfContextKind,
pub write_wal_time: u64,
pub pre_and_post_process: u64,
pub write_memtable_time: u64,
pub write_thread_wait: u64,
pub db_mutex_lock_nanos: u64,
pub write_scheduling_flushes_compactions_time: u64,
pub db_condition_wait_nanos: u64,
pub write_delay_time: u64,
}
impl PerfContextStatistics {
/// Create an instance which stores instant statistics values, retrieved at creation.
pub fn new(perf_level: PerfLevel, kind: PerfContextKind) -> Self {
PerfContextStatistics {
perf_level,
kind,
write_wal_time: 0,
pre_and_post_process: 0,
write_thread_wait: 0,
write_memtable_time: 0,
db_mutex_lock_nanos: 0,
write_scheduling_flushes_compactions_time: 0,
db_condition_wait_nanos: 0,
write_delay_time: 0,
}
}
fn apply_write_perf_settings(&self) {
if self.perf_level == PerfLevel::Uninitialized {
set_perf_flags(&*DEFAULT_WRITE_PERF_FLAGS);
} else {
set_perf_level(raw_util::to_raw_perf_level(self.perf_level));
}
}
pub fn start(&mut self) {
if self.perf_level == PerfLevel::Disable {
return;
}
let mut ctx = RawPerfContext::get();
ctx.reset();
self.apply_write_perf_settings();
self.write_wal_time = 0;
self.pre_and_post_process = 0;
self.db_mutex_lock_nanos = 0;
self.write_thread_wait = 0;
self.write_memtable_time = 0;
self.write_scheduling_flushes_compactions_time = 0;
self.db_condition_wait_nanos = 0;
self.write_delay_time = 0;
}
pub fn report(&mut self) {
match self.kind {
PerfContextKind::RaftstoreApply => {
report_perf_context!(self, APPLY_PERF_CONTEXT_TIME_HISTOGRAM_STATIC);
}
PerfContextKind::RaftstoreStore => {
report_perf_context!(self, STORE_PERF_CONTEXT_TIME_HISTOGRAM_STATIC);
}
}
}
}
| 35.748031 | 97 | 0.644714 |
16a76f6cfea6b33c9760719bbfbc289ec08ef1e6 | 2,517 | use crate::command_prelude::*;
use anyhow::bail;
use cargo::{drop_println, CargoResult};
use serde::Serialize;
pub fn cli() -> App {
subcommand("locate-project")
.about("Print a JSON representation of a Cargo.toml file's location")
.arg(opt("quiet", "No output printed to stdout").short("q"))
.arg_manifest_path()
.arg(
opt(
"message-format",
"Output representation [possible values: json, plain]",
)
.value_name("FMT"),
)
.arg(opt("workspace", "Locate Cargo.toml of the workspace root"))
.after_help("Run `cargo help locate-project` for more detailed information.\n")
}
#[derive(Serialize)]
pub struct ProjectLocation<'a> {
root: &'a str,
}
pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult {
let root_manifest;
let workspace;
let root = match WhatToFind::parse(args) {
WhatToFind::CurrentManifest => {
root_manifest = args.root_manifest(config)?;
&root_manifest
}
WhatToFind::Workspace => {
workspace = args.workspace(config)?;
workspace.root_manifest()
}
};
let root = root
.to_str()
.ok_or_else(|| {
anyhow::format_err!(
"your package path contains characters \
not representable in Unicode"
)
})
.map_err(|e| CliError::new(e, 1))?;
let location = ProjectLocation { root };
match MessageFormat::parse(args)? {
MessageFormat::Json => config.shell().print_json(&location)?,
MessageFormat::Plain => drop_println!(config, "{}", location.root),
}
Ok(())
}
enum WhatToFind {
CurrentManifest,
Workspace,
}
impl WhatToFind {
fn parse(args: &ArgMatches<'_>) -> Self {
if args.is_present("workspace") {
WhatToFind::Workspace
} else {
WhatToFind::CurrentManifest
}
}
}
enum MessageFormat {
Json,
Plain,
}
impl MessageFormat {
fn parse(args: &ArgMatches<'_>) -> CargoResult<Self> {
let fmt = match args.value_of("message-format") {
Some(fmt) => fmt,
None => return Ok(MessageFormat::Json),
};
match fmt.to_ascii_lowercase().as_str() {
"json" => Ok(MessageFormat::Json),
"plain" => Ok(MessageFormat::Plain),
s => bail!("invalid message format specifier: `{}`", s),
}
}
}
| 26.776596 | 87 | 0.559396 |
187c21136f3c5562e0971d046959bf20631cf782 | 16,511 | //
// Copyright 2020 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! gRPC client pseudo-Node functionality.
use crate::{
io::{Receiver, ReceiverExt},
metrics::Metrics,
node::{
grpc::{codec::VecCodec, invocation::Invocation},
ConfigurationError, Node,
},
NodePrivilege, RuntimeProxy,
};
use log::{debug, error, info, trace, warn};
use maplit::hashset;
use oak_abi::{proto::oak::application::GrpcClientConfiguration, Handle, OakStatus};
use oak_io::{handle::ReadHandle, OakError};
use oak_services::proto::{google::rpc, oak::encap::GrpcResponse};
use tokio::sync::oneshot;
use tonic::transport::{Certificate, Channel, ClientTlsConfig, Uri};
/// Struct that represents a gRPC client pseudo-Node.
pub struct GrpcClientNode {
/// Pseudo-Node name.
node_name: String,
/// The URI component of a gRPC server endpoint. Must contain the "Host" element.
/// https://docs.rs/tonic/0.2.1/tonic/transport/struct.Uri.html
uri: Uri,
/// Loaded PEM encoded X.509 TLS root certificate file used to authenticate an external gRPC
/// service.
root_tls_certificate: Certificate,
/// gRPC client to allow re-use of connection across multiple method invocations.
grpc_client: Option<tonic::client::Grpc<tonic::transport::channel::Channel>>,
node_privilege: NodePrivilege,
}
/// Checks if URI contains the "Host" element.
fn check_uri(uri: &Uri) -> Result<(), ConfigurationError> {
uri.authority()
.filter(|authority| !authority.host().is_empty())
.map(|_| ())
.ok_or(ConfigurationError::NoHostElement)
}
fn grpc_client_node_privilege(uri: &Uri) -> NodePrivilege {
if uri.scheme() == Some(&http::uri::Scheme::HTTPS) {
// Authority is the host:port portion of the endpoint name.
if let Some(authority) = uri.authority() {
NodePrivilege::new(
hashset! { oak_abi::label::tls_endpoint_tag(&authority.as_str()) },
hashset! { oak_abi::label::tls_endpoint_tag(&authority.as_str()) },
)
} else {
NodePrivilege::default()
}
} else {
NodePrivilege::default()
}
}
impl GrpcClientNode {
/// Creates a new [`GrpcClientNode`] instance, but does not start it.
pub fn new(
node_name: &str,
config: GrpcClientConfiguration,
root_tls_certificate: Certificate,
) -> Result<Self, ConfigurationError> {
let uri = config.uri.parse().map_err(|error| {
error!("Error parsing URI {}: {:?}", config.uri, error);
ConfigurationError::IncorrectURI
})?;
check_uri(&uri)?;
// We compute the node privilege once and for all at start and just store it, since it does
// not change throughout the node execution.
let node_privilege = grpc_client_node_privilege(&uri);
// TODO(#814): Actually check that the newly created node can write to the
// "public_untrusted" label, taking into account its own label and privilege.
Ok(Self {
node_name: node_name.to_string(),
uri,
root_tls_certificate,
grpc_client: None,
node_privilege,
})
}
/// Main loop that handles gRPC invocations from the `handle`, sends gRPC requests to an
/// external gRPC service and writes gRPC responses back to the invocation channel.
async fn handle_loop(&mut self, runtime: RuntimeProxy, handle: Handle) -> Result<(), OakError> {
// Create a [`Receiver`] used for reading gRPC invocations.
let receiver = Receiver::<Invocation>::new(ReadHandle { handle });
loop {
debug!("Waiting for gRPC invocation");
// Read a gRPC invocation from the [`Receiver`].
let invocation = receiver.receive(&runtime).map_err(|error| {
match error {
OakError::OakStatus(OakStatus::ErrTerminated) => {
debug!("gRPC client node is terminating.")
}
OakError::OakStatus(OakStatus::ErrChannelClosed) => {
info!("gRPC invocation channel closed")
}
_ => error!("Couldn't receive the invocation: {:?}", error),
}
error
})?;
let result = self.process_invocation(&runtime, &invocation).await;
info!("Invocation processing finished: {:?}", result);
if result.is_err() {
warn!(
"Error encountered; forcing re-connection next time around ({:?})",
result
);
self.grpc_client = None;
}
invocation.close(&runtime);
}
}
/// Process a gRPC method invocation for an external gRPC service.
async fn process_invocation(
&mut self,
runtime: &RuntimeProxy,
invocation: &Invocation,
) -> Result<(), OakError> {
let uri = self.uri.to_string();
let record_completion_with_error = |method_name, error_code| {
runtime
.metrics_data()
.grpc_client_metrics
.grpc_client_completed
.with_label_values(&[&uri, method_name, &format!("{:?}", error_code)])
.inc();
// In case of an error, update the latency with zero to keep the counts consistent.
runtime
.metrics_data()
.grpc_client_metrics
.grpc_client_completed_latency_seconds
.with_label_values(&[&uri, method_name])
.observe(0_f64);
};
let send_error = |code, msg| {
invocation.send_error(code, msg, runtime);
// Update the number of started requests to keep the counts consistent.
runtime
.metrics_data()
.grpc_client_metrics
.observe_new_request(&uri, "unknown", 0);
record_completion_with_error("unknown", code);
};
// Receive a request from the invocation channel.
let request = invocation.receive_request(&runtime).map_err(|error| {
send_error(rpc::Code::Internal, "Failed to read request");
error!(
"Couldn't read gRPC request from the invocation: {:?}",
error
);
error
})?;
debug!("Incoming gRPC request: {:?}", request);
if self.grpc_client.is_none() {
// Connect to an external gRPC service.
self.grpc_client = Some(self.connect().await.map_err(|error| {
error!("Couldn't connect to {}: {:?}", self.uri, error);
send_error(rpc::Code::NotFound, "Service connection failed");
OakStatus::ErrInternal
})?);
}
let grpc_client = self.grpc_client.as_mut().unwrap();
grpc_client.ready().await.map_err(|error| {
error!("Service was not ready: {}", error);
send_error(rpc::Code::NotFound, "Service not ready");
OakStatus::ErrInternal
})?;
let codec = VecCodec::default();
let path = request.method_name.parse().map_err(|error| {
error!("Invalid URI {}: {}", request.method_name, error);
send_error(rpc::Code::InvalidArgument, "Invalid URI");
OakStatus::ErrInternal
})?;
let method_name = request.method_name;
runtime
.metrics_data()
.grpc_client_metrics
.observe_new_request(&uri, &method_name, request.req_msg.len());
// Forward the request to the external gRPC service and wait for the response(s).
let request = tonic::Request::new(request.req_msg);
let request_stream =
request.map(|m| futures_util::stream::once(futures_util::future::ready(m)));
let rsp_stream = match grpc_client.streaming(request_stream, path, codec).await {
Ok(rsp_stream) => rsp_stream,
Err(error) => {
error!("Request to remote service failed: {}", error);
let error_code = tonic_code_to_grpc(error.code());
invocation.send_error(error_code, error.message(), runtime);
record_completion_with_error(&method_name, error_code);
return Ok(());
}
};
let mut response_handler =
ResponseHandler::new(runtime.clone(), rsp_stream, invocation, uri, method_name);
response_handler.handle().await
}
/// Creates a TLS connection to an external gRPC service.
async fn connect(
&self,
) -> Result<tonic::client::Grpc<tonic::transport::channel::Channel>, tonic::transport::Error>
{
debug!("Connecting to {}", self.uri);
// Create a TLS configuration.
let tls_config = ClientTlsConfig::new().ca_certificate(self.root_tls_certificate.clone());
// Connect to a remote gRPC service.
let connection = Channel::builder(self.uri.clone())
.tls_config(tls_config)
.expect("Couldn't create TLS configuration")
.connect()
.await?;
debug!("Connected to {}", self.uri);
Ok(tonic::client::Grpc::new(connection))
}
}
struct MetricsRecorder {
metrics_data: Metrics,
server: String,
method_name: String,
msg_count: u32,
status_code: rpc::Code,
_timer: prometheus::HistogramTimer,
}
impl MetricsRecorder {
fn new(runtime: RuntimeProxy, server: String, method_name: String) -> MetricsRecorder {
let metrics_data = runtime.metrics_data();
let timer = metrics_data
.grpc_client_metrics
.start_timer(&server, &method_name);
MetricsRecorder {
metrics_data,
server,
method_name,
msg_count: 0,
status_code: rpc::Code::Ok,
_timer: timer,
}
}
fn update_status_code(&mut self, status_code: rpc::Code) {
self.status_code = status_code;
}
fn observe_message_with_len(&mut self, msg_len: usize) {
self.msg_count += 1;
self.metrics_data
.grpc_client_metrics
.observe_new_response_message(&self.server, &self.method_name, msg_len);
}
fn observe_completion(&self) {
self.metrics_data
.grpc_client_metrics
.observe_response_handling_completed(
&self.server,
&self.method_name,
&format!("{:?}", self.status_code),
self.msg_count,
);
}
}
impl Drop for MetricsRecorder {
fn drop(&mut self) {
trace!(
"Dropping MetricsRecorder for '{}:{}'.",
self.server,
self.method_name,
);
self.observe_completion();
// Note that dropping self._timer will record the duration.
}
}
struct ResponseHandler<'a> {
runtime: RuntimeProxy,
response_stream: tonic::Response<tonic::Streaming<Vec<u8>>>,
invocation: &'a Invocation,
// The lifetime of the metrics recorder matches the lifetime of the
// response handler, updating the metrics when the handler is dropped.
metrics_recorder: MetricsRecorder,
}
impl<'a> ResponseHandler<'a> {
fn new(
runtime: RuntimeProxy,
response_stream: tonic::Response<tonic::Streaming<Vec<u8>>>,
invocation: &'a Invocation,
server: String,
method_name: String,
) -> Self {
let metrics_recorder = MetricsRecorder::new(runtime.clone(), server, method_name);
ResponseHandler {
runtime,
response_stream,
invocation,
metrics_recorder,
}
}
async fn handle(&mut self) -> Result<(), OakError> {
let body_stream = self.response_stream.get_mut();
loop {
let metrics_recorder = &mut self.metrics_recorder;
let invocation = self.invocation;
let runtime = &self.runtime.clone();
let message = body_stream.message().await.map_err(|error| {
error!("Failed to read response: {}", error);
invocation.send_error(rpc::Code::Internal, "Failed to read response", runtime);
metrics_recorder.update_status_code(rpc::Code::Internal);
OakStatus::ErrInternal
})?;
if let Some(message) = message {
let msg_len = message.len();
let encap_rsp = GrpcResponse {
rsp_msg: message,
status: None,
last: false,
};
// Send the response back to the invocation channel.
debug!("Sending gRPC response: {:?}", encap_rsp);
invocation
.send_response(encap_rsp, runtime)
.map_err(|error| {
error!("Couldn't send gRPC response to the invocation: {:?}", error);
error
})?;
metrics_recorder.observe_message_with_len(msg_len);
} else {
debug!("No message available, close out method invocation");
break;
}
}
Ok(())
}
}
/// Oak Node implementation for the gRPC client pseudo-Node.
impl Node for GrpcClientNode {
fn run(
mut self: Box<Self>,
runtime: RuntimeProxy,
handle: Handle,
notify_receiver: oneshot::Receiver<()>,
) {
// Create an Async runtime for executing futures.
// https://docs.rs/tokio/
let mut async_runtime = tokio::runtime::Builder::new()
// Use simple scheduler that runs all tasks on the current-thread.
// https://docs.rs/tokio/0.2.16/tokio/runtime/index.html#basic-scheduler
.basic_scheduler()
// Enables the I/O driver.
// Necessary for using net, process, signal, and I/O types on the Tokio runtime.
.enable_io()
// Enables the time driver.
// Necessary for creating a Tokio Runtime.
.enable_time()
.build()
.expect("Couldn't create an Async runtime");
// Listen to incoming gRPC invocations.
info!(
"{}: Starting gRPC client pseudo-Node thread",
self.node_name
);
async_runtime.block_on(futures::future::select(
Box::pin(self.handle_loop(runtime, handle)),
notify_receiver,
));
info!("{}: Exiting gRPC client pseudo-Node thread", self.node_name);
}
fn get_privilege(&self) -> NodePrivilege {
self.node_privilege.clone()
}
}
fn tonic_code_to_grpc(code: tonic::Code) -> rpc::Code {
match code {
tonic::Code::Ok => rpc::Code::Ok,
tonic::Code::Cancelled => rpc::Code::Cancelled,
tonic::Code::Unknown => rpc::Code::Unknown,
tonic::Code::InvalidArgument => rpc::Code::InvalidArgument,
tonic::Code::DeadlineExceeded => rpc::Code::DeadlineExceeded,
tonic::Code::NotFound => rpc::Code::NotFound,
tonic::Code::AlreadyExists => rpc::Code::AlreadyExists,
tonic::Code::PermissionDenied => rpc::Code::PermissionDenied,
tonic::Code::ResourceExhausted => rpc::Code::ResourceExhausted,
tonic::Code::FailedPrecondition => rpc::Code::FailedPrecondition,
tonic::Code::Aborted => rpc::Code::Aborted,
tonic::Code::OutOfRange => rpc::Code::OutOfRange,
tonic::Code::Unimplemented => rpc::Code::Unimplemented,
tonic::Code::Internal => rpc::Code::Internal,
tonic::Code::Unavailable => rpc::Code::Unavailable,
tonic::Code::DataLoss => rpc::Code::DataLoss,
tonic::Code::Unauthenticated => rpc::Code::Unauthenticated,
_ => rpc::Code::Unknown,
}
}
| 37.956322 | 100 | 0.585428 |
71e9270fe4b0cba6df22453a51f7f3533e51477e | 5,436 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::option::*;
use core::kinds::marker;
use core::mem;
#[test]
fn test_get_ptr() {
unsafe {
let x = box 0i;
let addr_x: *const int = mem::transmute(&*x);
let opt = Some(x);
let y = opt.unwrap();
let addr_y: *const int = mem::transmute(&*y);
assert_eq!(addr_x, addr_y);
}
}
#[test]
fn test_get_str() {
let x = "test".to_string();
let addr_x = x.as_slice().as_ptr();
let opt = Some(x);
let y = opt.unwrap();
let addr_y = y.as_slice().as_ptr();
assert_eq!(addr_x, addr_y);
}
#[test]
fn test_get_resource() {
use std::rc::Rc;
use core::cell::RefCell;
struct R {
i: Rc<RefCell<int>>,
}
#[unsafe_destructor]
impl Drop for R {
fn drop(&mut self) {
let ii = &*self.i;
let i = *ii.borrow();
*ii.borrow_mut() = i + 1;
}
}
fn r(i: Rc<RefCell<int>>) -> R {
R {
i: i
}
}
let i = Rc::new(RefCell::new(0i));
{
let x = r(i.clone());
let opt = Some(x);
let _y = opt.unwrap();
}
assert_eq!(*i.borrow(), 1);
}
#[test]
fn test_option_dance() {
let x = Some(());
let mut y = Some(5i);
let mut y2 = 0;
for _x in x.iter() {
y2 = y.take().unwrap();
}
assert_eq!(y2, 5);
assert!(y.is_none());
}
#[test] #[should_fail]
fn test_option_too_much_dance() {
let mut y = Some(marker::NoCopy);
let _y2 = y.take().unwrap();
let _y3 = y.take().unwrap();
}
#[test]
fn test_and() {
let x: Option<int> = Some(1i);
assert_eq!(x.and(Some(2i)), Some(2));
assert_eq!(x.and(None::<int>), None);
let x: Option<int> = None;
assert_eq!(x.and(Some(2i)), None);
assert_eq!(x.and(None::<int>), None);
}
#[test]
fn test_and_then() {
let x: Option<int> = Some(1);
assert_eq!(x.and_then(|x| Some(x + 1)), Some(2));
assert_eq!(x.and_then(|_| None::<int>), None);
let x: Option<int> = None;
assert_eq!(x.and_then(|x| Some(x + 1)), None);
assert_eq!(x.and_then(|_| None::<int>), None);
}
#[test]
fn test_or() {
let x: Option<int> = Some(1);
assert_eq!(x.or(Some(2)), Some(1));
assert_eq!(x.or(None), Some(1));
let x: Option<int> = None;
assert_eq!(x.or(Some(2)), Some(2));
assert_eq!(x.or(None), None);
}
#[test]
fn test_or_else() {
let x: Option<int> = Some(1);
assert_eq!(x.or_else(|| Some(2)), Some(1));
assert_eq!(x.or_else(|| None), Some(1));
let x: Option<int> = None;
assert_eq!(x.or_else(|| Some(2)), Some(2));
assert_eq!(x.or_else(|| None), None);
}
#[test]
fn test_unwrap() {
assert_eq!(Some(1i).unwrap(), 1);
let s = Some("hello".to_string()).unwrap();
assert_eq!(s.as_slice(), "hello");
}
#[test]
#[should_fail]
fn test_unwrap_fail1() {
let x: Option<int> = None;
x.unwrap();
}
#[test]
#[should_fail]
fn test_unwrap_fail2() {
let x: Option<String> = None;
x.unwrap();
}
#[test]
fn test_unwrap_or() {
let x: Option<int> = Some(1);
assert_eq!(x.unwrap_or(2), 1);
let x: Option<int> = None;
assert_eq!(x.unwrap_or(2), 2);
}
#[test]
fn test_unwrap_or_else() {
let x: Option<int> = Some(1);
assert_eq!(x.unwrap_or_else(|| 2), 1);
let x: Option<int> = None;
assert_eq!(x.unwrap_or_else(|| 2), 2);
}
#[test]
fn test_iter() {
let val = 5i;
let x = Some(val);
let mut it = x.iter();
assert_eq!(it.size_hint(), (1, Some(1)));
assert_eq!(it.next(), Some(&val));
assert_eq!(it.size_hint(), (0, Some(0)));
assert!(it.next().is_none());
}
#[test]
fn test_mut_iter() {
let val = 5i;
let new_val = 11i;
let mut x = Some(val);
{
let mut it = x.iter_mut();
assert_eq!(it.size_hint(), (1, Some(1)));
match it.next() {
Some(interior) => {
assert_eq!(*interior, val);
*interior = new_val;
}
None => assert!(false),
}
assert_eq!(it.size_hint(), (0, Some(0)));
assert!(it.next().is_none());
}
assert_eq!(x, Some(new_val));
}
#[test]
fn test_ord() {
let small = Some(1.0f64);
let big = Some(5.0f64);
let nan = Some(0.0f64/0.0);
assert!(!(nan < big));
assert!(!(nan > big));
assert!(small < big);
assert!(None < big);
assert!(big > None);
}
#[test]
fn test_collect() {
let v: Option<Vec<int>> = range(0i, 0).map(|_| Some(0i)).collect();
assert!(v == Some(vec![]));
let v: Option<Vec<int>> = range(0i, 3).map(|x| Some(x)).collect();
assert!(v == Some(vec![0, 1, 2]));
let v: Option<Vec<int>> = range(0i, 3).map(|x| {
if x > 1 { None } else { Some(x) }
}).collect();
assert!(v == None);
// test that it does not take more elements than it needs
let mut functions = [|| Some(()), || None, || fail!()];
let v: Option<Vec<()>> = functions.iter_mut().map(|f| (*f)()).collect();
assert!(v == None);
}
| 22.46281 | 76 | 0.542494 |
7a0ac9d8b4f935fdd8c78368d18310ed75a51329 | 9,368 | #![allow(unused_imports, non_camel_case_types)]
use crate::models::r4::CodeableConcept::CodeableConcept;
use crate::models::r4::Element::Element;
use crate::models::r4::Extension::Extension;
use serde_json::json;
use serde_json::value::Value;
use std::borrow::Cow;
/// Set of definitional characteristics for a kind of observation or measurement
/// produced or consumed by an orderable health care service.
#[derive(Debug)]
pub struct ObservationDefinition_QuantitativeDetails<'a> {
pub(crate) value: Cow<'a, Value>,
}
impl ObservationDefinition_QuantitativeDetails<'_> {
pub fn new(value: &Value) -> ObservationDefinition_QuantitativeDetails {
ObservationDefinition_QuantitativeDetails {
value: Cow::Borrowed(value),
}
}
pub fn to_json(&self) -> Value {
(*self.value).clone()
}
/// Extensions for conversionFactor
pub fn _conversion_factor(&self) -> Option<Element> {
if let Some(val) = self.value.get("_conversionFactor") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for decimalPrecision
pub fn _decimal_precision(&self) -> Option<Element> {
if let Some(val) = self.value.get("_decimalPrecision") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Factor for converting value expressed with SI unit to value expressed with
/// customary unit.
pub fn conversion_factor(&self) -> Option<f64> {
if let Some(val) = self.value.get("conversionFactor") {
return Some(val.as_f64().unwrap());
}
return None;
}
/// Customary unit used to report quantitative results of observations conforming to
/// this ObservationDefinition.
pub fn customary_unit(&self) -> Option<CodeableConcept> {
if let Some(val) = self.value.get("customaryUnit") {
return Some(CodeableConcept {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Number of digits after decimal separator when the results of such observations are
/// of type Quantity.
pub fn decimal_precision(&self) -> Option<i64> {
if let Some(val) = self.value.get("decimalPrecision") {
return Some(val.as_i64().unwrap());
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the element. To make the use of extensions safe and manageable,
/// there is a strict set of governance applied to the definition and use of
/// extensions. Though any implementer can define an extension, there is a set of
/// requirements that SHALL be met as part of the definition of the extension.
pub fn extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("extension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Unique id for the element within a resource (for internal references). This may be
/// any string value that does not contain spaces.
pub fn id(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("id") {
return Some(string);
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the element and that modifies the understanding of the element
/// in which it is contained and/or the understanding of the containing element's
/// descendants. Usually modifier elements provide negation or qualification. To make
/// the use of extensions safe and manageable, there is a strict set of governance
/// applied to the definition and use of extensions. Though any implementer can define
/// an extension, there is a set of requirements that SHALL be met as part of the
/// definition of the extension. Applications processing a resource are required to
/// check for modifier extensions. Modifier extensions SHALL NOT change the meaning
/// of any elements on Resource or DomainResource (including cannot change the meaning
/// of modifierExtension itself).
pub fn modifier_extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("modifierExtension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// SI unit used to report quantitative results of observations conforming to this
/// ObservationDefinition.
pub fn unit(&self) -> Option<CodeableConcept> {
if let Some(val) = self.value.get("unit") {
return Some(CodeableConcept {
value: Cow::Borrowed(val),
});
}
return None;
}
pub fn validate(&self) -> bool {
if let Some(_val) = self._conversion_factor() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._decimal_precision() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.conversion_factor() {}
if let Some(_val) = self.customary_unit() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.decimal_precision() {}
if let Some(_val) = self.extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.id() {}
if let Some(_val) = self.modifier_extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.unit() {
if !_val.validate() {
return false;
}
}
return true;
}
}
#[derive(Debug)]
pub struct ObservationDefinition_QuantitativeDetailsBuilder {
pub(crate) value: Value,
}
impl ObservationDefinition_QuantitativeDetailsBuilder {
pub fn build(&self) -> ObservationDefinition_QuantitativeDetails {
ObservationDefinition_QuantitativeDetails {
value: Cow::Owned(self.value.clone()),
}
}
pub fn with(
existing: ObservationDefinition_QuantitativeDetails,
) -> ObservationDefinition_QuantitativeDetailsBuilder {
ObservationDefinition_QuantitativeDetailsBuilder {
value: (*existing.value).clone(),
}
}
pub fn new() -> ObservationDefinition_QuantitativeDetailsBuilder {
let mut __value: Value = json!({});
return ObservationDefinition_QuantitativeDetailsBuilder { value: __value };
}
pub fn _conversion_factor<'a>(
&'a mut self,
val: Element,
) -> &'a mut ObservationDefinition_QuantitativeDetailsBuilder {
self.value["_conversionFactor"] = json!(val.value);
return self;
}
pub fn _decimal_precision<'a>(
&'a mut self,
val: Element,
) -> &'a mut ObservationDefinition_QuantitativeDetailsBuilder {
self.value["_decimalPrecision"] = json!(val.value);
return self;
}
pub fn conversion_factor<'a>(
&'a mut self,
val: f64,
) -> &'a mut ObservationDefinition_QuantitativeDetailsBuilder {
self.value["conversionFactor"] = json!(val);
return self;
}
pub fn customary_unit<'a>(
&'a mut self,
val: CodeableConcept,
) -> &'a mut ObservationDefinition_QuantitativeDetailsBuilder {
self.value["customaryUnit"] = json!(val.value);
return self;
}
pub fn decimal_precision<'a>(
&'a mut self,
val: i64,
) -> &'a mut ObservationDefinition_QuantitativeDetailsBuilder {
self.value["decimalPrecision"] = json!(val);
return self;
}
pub fn extension<'a>(
&'a mut self,
val: Vec<Extension>,
) -> &'a mut ObservationDefinition_QuantitativeDetailsBuilder {
self.value["extension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn id<'a>(
&'a mut self,
val: &str,
) -> &'a mut ObservationDefinition_QuantitativeDetailsBuilder {
self.value["id"] = json!(val);
return self;
}
pub fn modifier_extension<'a>(
&'a mut self,
val: Vec<Extension>,
) -> &'a mut ObservationDefinition_QuantitativeDetailsBuilder {
self.value["modifierExtension"] =
json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn unit<'a>(
&'a mut self,
val: CodeableConcept,
) -> &'a mut ObservationDefinition_QuantitativeDetailsBuilder {
self.value["unit"] = json!(val.value);
return self;
}
}
| 33.942029 | 94 | 0.593296 |
9ba612a8d75514d494aa99230136456dfe78204b | 10,761 | // Copyright 2019-2020 Twitter, Inc.
// Licensed under the Apache License, Version 2.0
// http://www.apache.org/licenses/LICENSE-2.0
use rustcommon_metrics::*;
use serde_derive::{Deserialize, Serialize};
use strum_macros::{EnumIter, EnumString, IntoStaticStr};
#[cfg(feature = "bpf")]
use crate::common::bpf::*;
#[derive(
Clone,
Copy,
Debug,
Deserialize,
EnumIter,
EnumString,
Eq,
IntoStaticStr,
PartialEq,
Hash,
Serialize,
)]
#[serde(deny_unknown_fields, try_from = "&str", into = "&str")]
pub enum TcpStatistic {
#[strum(serialize = "tcp/connect/latency")]
ConnectLatency,
#[strum(serialize = "tcp/receive/segment")]
ReceiveSegments,
#[strum(serialize = "tcp/transmit/segment")]
TransmitSegments,
#[strum(serialize = "tcp/receive/prune_called")]
ReceivePruneCalled,
#[strum(serialize = "tcp/receive/collapsed")]
ReceiveCollapsed,
#[strum(serialize = "tcp/transmit/retransmit")]
Retransmits,
#[strum(serialize = "tcp/receive/checksum_error")]
ReceiveChecksumErrors,
#[strum(serialize = "tcp/transmit/reset")]
TransmitResets,
#[strum(serialize = "tcp/receive/error")]
ReceiveErrors,
#[strum(serialize = "tcp/syncookies/sent")]
SyncookiesSent,
#[strum(serialize = "tcp/syncookies/received")]
SyncookiesRecieved,
#[strum(serialize = "tcp/syncookies/failed")]
SyncookiesFailed,
#[strum(serialize = "tcp/receive/pruned")]
ReceivePruned,
#[strum(serialize = "tcp/receive/ofo_pruned")]
ReceiveOfoPruned,
#[strum(serialize = "tcp/transmit/delayed_ack")]
TransmitDelayedAcks,
#[strum(serialize = "tcp/receive/listen_overflows")]
ReceiveListenOverflows,
#[strum(serialize = "tcp/receive/listen_drops")]
ReceiveListenDrops,
#[strum(serialize = "tcp/abort/failed")]
AbortFailed,
#[strum(serialize = "tcp/abort/on_close")]
AbortOnClose,
#[strum(serialize = "tcp/abort/on_data")]
AbortOnData,
#[strum(serialize = "tcp/abort/on_linger")]
AbortOnLinger,
#[strum(serialize = "tcp/abort/on_memory")]
AbortOnMemory,
#[strum(serialize = "tcp/abort/on_timeout")]
AbortOnTimeout,
#[strum(serialize = "tcp/srtt")]
SmoothedRoundTripTime,
#[strum(serialize = "tcp/jitter")]
Jitter,
#[strum(serialize = "tcp/connection/accepted")]
ConnectionAccepted,
#[strum(serialize = "tcp/connection/initiated")]
ConnectionInitiated,
#[strum(serialize = "tcp/drop")]
Drop,
#[strum(serialize = "tcp/tlp")]
TailLossProbe,
#[strum(serialize = "tcp/transmit/retransmit_timeout")]
RetransmitTimeout,
#[strum(serialize = "tcp/receive/duplicate")]
Duplicate,
#[strum(serialize = "tcp/receive/out_of_order")]
OutOfOrder,
}
impl TcpStatistic {
pub fn keys(self) -> Option<(&'static str, &'static str)> {
match self {
Self::AbortFailed => Some(("TcpExt:", "TCPAbortFailed")),
Self::AbortOnClose => Some(("TcpExt:", "TCPAbortOnClose")),
Self::AbortOnData => Some(("TcpExt:", "TCPAbortOnData")),
Self::AbortOnLinger => Some(("TcpExt:", "TCPAbortOnLinger")),
Self::AbortOnMemory => Some(("TcpExt:", "TCPAbortOnMemory")),
Self::AbortOnTimeout => Some(("TcpExt:", "TCPAbortOnTimeout")),
Self::ReceiveSegments => Some(("Tcp:", "InSegs")),
Self::TransmitSegments => Some(("Tcp:", "OutSegs")),
Self::ReceivePruneCalled => Some(("TcpExt:", "PruneCalled")),
Self::ReceiveCollapsed => Some(("TcpExt:", "TCPRcvCollapsed")),
Self::Retransmits => Some(("Tcp:", "RetransSegs")),
Self::ReceiveChecksumErrors => Some(("Tcp:", "InCsumErrors")),
Self::TransmitResets => Some(("Tcp:", "OutRsts")),
Self::ReceiveErrors => Some(("Tcp:", "InErrs")),
Self::SyncookiesSent => Some(("TcpExt:", "SyncookiesSent")),
Self::SyncookiesRecieved => Some(("TcpExt:", "SyncookiesRecv")),
Self::SyncookiesFailed => Some(("TcpExt:", "SyncookiesFailed")),
Self::ReceivePruned => Some(("TcpExt:", "RcvPruned")),
Self::ReceiveOfoPruned => Some(("TcpExt:", "OfoPruned")),
Self::TransmitDelayedAcks => Some(("TcpExt:", "DelayedACKs")),
Self::ReceiveListenOverflows => Some(("TcpExt:", "ListenOverflows")),
Self::ReceiveListenDrops => Some(("TcpExt:", "ListenDrops")),
_ => None,
}
}
pub fn bpf_table(self) -> Option<&'static str> {
match self {
Self::ConnectLatency => Some("connlat"),
Self::SmoothedRoundTripTime => Some("srtt"),
Self::Jitter => Some("jitter"),
Self::ConnectionAccepted => Some("conn_accepted"),
Self::ConnectionInitiated => Some("conn_initiated"),
Self::Drop => Some("drop"),
Self::TailLossProbe => Some("tlp"),
Self::RetransmitTimeout => Some("rto"),
Self::Duplicate => Some("duplicate"),
Self::OutOfOrder => Some("ooo"),
_ => None,
}
}
#[cfg(feature = "bpf")]
pub fn bpf_probes_required(self) -> Vec<Probe> {
// define the unique probes below.
let tcp_connect_v4_probe = Probe {
name: "tcp_v4_connect".to_string(),
handler: "trace_connect".to_string(),
probe_type: ProbeType::Kernel,
probe_location: ProbeLocation::Entry,
binary_path: None,
sub_system: None,
};
let tcp_connect_v6_probe = Probe {
name: "tcp_v6_connect".to_string(),
handler: "trace_connect".to_string(),
probe_type: ProbeType::Kernel,
probe_location: ProbeLocation::Entry,
binary_path: None,
sub_system: None,
};
let tcp_connect_v4_ret_probe = Probe {
name: "tcp_v4_connect".to_string(),
handler: "trace_connect_return".to_string(),
probe_type: ProbeType::Kernel,
probe_location: ProbeLocation::Return,
binary_path: None,
sub_system: None,
};
let tcp_connect_v6_ret_probe = Probe {
name: "tcp_v6_connect".to_string(),
handler: "trace_connect_return".to_string(),
probe_type: ProbeType::Kernel,
probe_location: ProbeLocation::Return,
binary_path: None,
sub_system: None,
};
let tcp_rcv_state_process_probe = Probe {
name: "tcp_rcv_state_process".to_string(),
handler: "trace_tcp_rcv_state_process".to_string(),
probe_type: ProbeType::Kernel,
probe_location: ProbeLocation::Entry,
binary_path: None,
sub_system: None,
};
let tcp_rcv_established_probe = Probe {
name: "tcp_rcv_established".to_string(),
handler: "trace_tcp_rcv".to_string(),
probe_type: ProbeType::Kernel,
probe_location: ProbeLocation::Entry,
binary_path: None,
sub_system: None,
};
let inet_csk_accept_ret_probe = Probe {
name: "inet_csk_accept".to_string(),
handler: "trace_inet_socket_accept_return".to_string(),
probe_type: ProbeType::Kernel,
probe_location: ProbeLocation::Return,
binary_path: None,
sub_system: None,
};
let tcp_set_state_probe = Probe {
name: "tcp_set_state".to_string(),
handler: "trace_tcp_set_state".to_string(),
probe_type: ProbeType::Kernel,
probe_location: ProbeLocation::Entry,
binary_path: None,
sub_system: None,
};
let tcp_finish_connect_ret_probe = Probe {
name: "tcp_finish_connect".to_string(),
handler: "trace_finish_connect".to_string(),
probe_type: ProbeType::Kernel,
probe_location: ProbeLocation::Return,
binary_path: None,
sub_system: None,
};
let tcp_drop_probe = Probe {
name: "tcp_drop".to_string(),
handler: "trace_tcp_drop".to_string(),
probe_type: ProbeType::Kernel,
probe_location: ProbeLocation::Entry,
binary_path: None,
sub_system: None,
};
let tcp_tlp_probe = Probe {
name: "tcp_send_loss_probe".to_string(),
handler: "trace_tlp".to_string(),
probe_type: ProbeType::Kernel,
probe_location: ProbeLocation::Entry,
binary_path: None,
sub_system: None,
};
let tcp_rto_probe = Probe {
name: "tcp_retransmit_timer".to_string(),
handler: "trace_rto".to_string(),
probe_type: ProbeType::Kernel,
probe_location: ProbeLocation::Entry,
binary_path: None,
sub_system: None,
};
let tcp_validate_incoming_probe = Probe {
name: "tcp_validate_incoming".to_string(),
handler: "trace_validate_incoming".to_string(),
probe_type: ProbeType::Kernel,
probe_location: ProbeLocation::Entry,
binary_path: None,
sub_system: None,
};
// specify what probes are required for each telemetry.
match self {
Self::ConnectLatency => vec![
tcp_connect_v4_probe,
tcp_connect_v6_probe,
tcp_rcv_state_process_probe,
],
Self::SmoothedRoundTripTime | Self::Jitter => vec![tcp_rcv_established_probe],
Self::ConnectionAccepted => vec![inet_csk_accept_ret_probe, tcp_set_state_probe],
Self::ConnectionInitiated => vec![
tcp_connect_v4_probe,
tcp_connect_v6_probe,
tcp_connect_v4_ret_probe,
tcp_connect_v6_ret_probe,
tcp_finish_connect_ret_probe,
tcp_set_state_probe,
],
Self::Drop => vec![tcp_drop_probe],
Self::TailLossProbe => vec![tcp_tlp_probe],
Self::RetransmitTimeout => vec![tcp_rto_probe],
Self::Duplicate => vec![tcp_validate_incoming_probe],
Self::OutOfOrder => vec![tcp_validate_incoming_probe],
_ => Vec::new(),
}
}
}
impl Statistic<AtomicU64, AtomicU32> for TcpStatistic {
fn name(&self) -> &str {
(*self).into()
}
fn source(&self) -> Source {
match self.bpf_table() {
Some("connlat") | Some("srtt") | Some("jitter") => Source::Distribution,
_ => Source::Counter,
}
}
}
| 37.757895 | 93 | 0.587957 |
ac2d2365611e24c6f7cf35ffd7be3b0e3b36b344 | 9,918 | use crate::gc::Gc;
use crate::rerrs::{ErrorKind, SteelErr};
use crate::rvals::{Result, SteelVal};
use crate::stop;
use im_rc::HashSet;
use crate::primitives::ListOperations;
use crate::primitives::VectorOperations;
pub struct HashSetOperations {}
impl HashSetOperations {
pub fn hs_construct() -> SteelVal {
SteelVal::FuncV(|args: &[SteelVal]| -> Result<SteelVal> {
let mut hs = HashSet::new();
for key in args {
if key.is_hashable() {
hs.insert(key.clone());
} else {
stop!(TypeMismatch => "hash key not hashable!");
}
}
Ok(SteelVal::HashSetV(Gc::new(hs)))
})
}
pub fn hs_insert() -> SteelVal {
SteelVal::FuncV(|args: &[SteelVal]| -> Result<SteelVal> {
if args.len() != 2 {
stop!(ArityMismatch => "set insert takes 2 arguments")
}
let hashset = &args[0];
let key = &args[1];
if let SteelVal::HashSetV(hs) = hashset {
let mut hs = hs.unwrap();
if key.is_hashable() {
hs.insert(key.clone());
} else {
stop!(TypeMismatch => "hash key not hashable!");
}
Ok(SteelVal::HashSetV(Gc::new(hs)))
} else {
stop!(TypeMismatch => "set insert takes a set")
}
})
}
pub fn hs_contains() -> SteelVal {
SteelVal::FuncV(|args: &[SteelVal]| -> Result<SteelVal> {
if args.len() != 2 {
stop!(ArityMismatch => "set-contains? get takes 2 arguments")
}
let hashset = &args[0];
let key = &args[1];
if let SteelVal::HashSetV(hm) = hashset {
if key.is_hashable() {
if hm.contains(key) {
Ok(SteelVal::BoolV(true))
} else {
Ok(SteelVal::BoolV(false))
}
} else {
stop!(TypeMismatch => "hash key not hashable!");
}
} else {
stop!(TypeMismatch => "set-contains? takes a hashmap")
}
})
}
// keys as list
pub fn keys_to_list() -> SteelVal {
SteelVal::FuncV(|args: &[SteelVal]| -> Result<SteelVal> {
if args.len() != 1 {
stop!(ArityMismatch => "hm-keys->list takes 1 argument")
}
let hashset = &args[0];
if let SteelVal::HashSetV(hs) = hashset {
let keys = hs.iter().cloned().collect::<Vec<SteelVal>>();
ListOperations::built_in_list_func_flat(&keys)
} else {
stop!(TypeMismatch => "hm-keys->list takes a hashmap")
}
})
}
// keys as vectors
pub fn keys_to_vector() -> SteelVal {
SteelVal::FuncV(|args: &[SteelVal]| -> Result<SteelVal> {
if args.len() != 1 {
stop!(ArityMismatch => "hm-keys->vector takes 1 argument")
}
let hashset = &args[0];
if let SteelVal::HashSetV(hs) = hashset {
VectorOperations::vec_construct_iter_normal(hs.iter().cloned())
} else {
stop!(TypeMismatch => "hm-keys->vector takes a hashmap")
}
})
}
pub fn clear() -> SteelVal {
SteelVal::FuncV(|args: &[SteelVal]| -> Result<SteelVal> {
if args.len() != 1 {
stop!(ArityMismatch => "hs-clear takes 1 argument")
}
let hashset = &args[0];
if let SteelVal::HashSetV(hs) = hashset {
let mut hs = hs.unwrap();
hs.clear();
Ok(SteelVal::HashSetV(Gc::new(hs)))
} else {
stop!(TypeMismatch => "hs-clear takes a hashmap")
}
})
}
pub fn list_to_hashset() -> SteelVal {
SteelVal::FuncV(|args: &[SteelVal]| -> Result<SteelVal> {
if args.len() != 1 {
stop!(ArityMismatch => "list->hashset takes one argument")
}
if let SteelVal::Pair(_) = &args[0] {
let root = &args[0];
let hashset: HashSet<SteelVal> = SteelVal::iter(root.clone()).collect();
Ok(SteelVal::HashSetV(Gc::new(hashset)))
} else {
stop!(TypeMismatch => "list->hashset takes a hashset");
}
})
}
}
#[cfg(test)]
mod hashset_tests {
use super::*;
use crate::throw;
// use im_rc::hashset;
fn apply_function(func: SteelVal, args: Vec<SteelVal>) -> Result<SteelVal> {
func.func_or_else(throw!(BadSyntax => "hash tests"))
.unwrap()(&args)
}
#[test]
fn hs_construct_normal() {
let args = vec![
SteelVal::StringV("foo".into()),
SteelVal::StringV("bar".into()),
SteelVal::StringV("foo2".into()),
SteelVal::StringV("bar2".into()),
];
let res = apply_function(HashSetOperations::hs_construct(), args);
let expected = SteelVal::HashSetV(Gc::new(
vec![
SteelVal::StringV("foo".into()),
SteelVal::StringV("bar".into()),
SteelVal::StringV("foo2".into()),
SteelVal::StringV("bar2".into()),
]
.into_iter()
.map(Gc::new)
.collect(),
));
assert_eq!(res.unwrap(), expected);
}
#[test]
fn hs_construct_with_duplicates() {
let args = vec![
SteelVal::StringV("foo".into()),
SteelVal::StringV("bar".into()),
SteelVal::StringV("foo2".into()),
SteelVal::StringV("bar2".into()),
SteelVal::StringV("foo".into()),
SteelVal::StringV("bar".into()),
SteelVal::StringV("foo2".into()),
SteelVal::StringV("bar2".into()),
];
let res = apply_function(HashSetOperations::hs_construct(), args);
let expected = SteelVal::HashSetV(Gc::new(
vec![
SteelVal::StringV("foo".into()),
SteelVal::StringV("bar".into()),
SteelVal::StringV("foo2".into()),
SteelVal::StringV("bar2".into()),
]
.into_iter()
.map(Gc::new)
.collect(),
));
assert_eq!(res.unwrap(), expected);
}
#[test]
fn hs_insert_from_empty() {
let args = vec![
SteelVal::HashSetV(Gc::new(vec![].into())),
SteelVal::StringV("foo".into()),
];
let res = apply_function(HashSetOperations::hs_insert(), args);
let expected = SteelVal::HashSetV(Gc::new(
vec![SteelVal::StringV("foo".into())]
.into_iter()
.map(Gc::new)
.collect(),
));
assert_eq!(res.unwrap(), expected);
}
#[test]
fn hs_contains_true() {
let args = vec![
SteelVal::HashSetV(Gc::new(
vec![SteelVal::StringV("foo".into())]
.into_iter()
.map(Gc::new)
.collect(),
)),
SteelVal::StringV("foo".into()),
];
let res = apply_function(HashSetOperations::hs_contains(), args);
let expected = SteelVal::BoolV(true);
assert_eq!(res.unwrap(), expected);
}
#[test]
fn hs_contains_false() {
let args = vec![
SteelVal::HashSetV(Gc::new(
vec![SteelVal::StringV("foo".into())]
.into_iter()
.map(Gc::new)
.collect(),
)),
SteelVal::StringV("bar".into()),
];
let res = apply_function(HashSetOperations::hs_contains(), args);
let expected = SteelVal::BoolV(false);
assert_eq!(res.unwrap(), expected);
}
#[test]
fn hs_keys_to_vector_normal() {
let args = vec![SteelVal::HashSetV(Gc::new(
vec![
SteelVal::StringV("foo".into()),
SteelVal::StringV("bar".into()),
SteelVal::StringV("baz".into()),
]
.into_iter()
.collect(),
))];
let res = apply_function(HashSetOperations::keys_to_vector(), args);
let expected = SteelVal::VectorV(Gc::new(
vec![
SteelVal::StringV("foo".into()),
SteelVal::StringV("bar".into()),
SteelVal::StringV("baz".into()),
]
.into_iter()
.collect(),
));
// pull out the vectors and sort them
// let unwrapped_expected: SteelVal = (*expected).clone();
let mut res_vec_string: Vec<Gc<String>> = if let SteelVal::VectorV(v) = res.unwrap() {
v.iter()
.map(|x| {
if let SteelVal::StringV(ref s) = x {
s.clone()
} else {
panic!("test failed")
}
})
.collect()
} else {
panic!("test failed")
};
let mut expected_vec_string: Vec<Gc<String>> = if let SteelVal::VectorV(v) = expected {
v.iter()
.map(|x| {
if let SteelVal::StringV(ref s) = x {
s.clone()
} else {
panic!("test failed")
}
})
.collect()
} else {
panic!("test failed")
};
res_vec_string.sort();
expected_vec_string.sort();
assert_eq!(res_vec_string, expected_vec_string);
}
}
| 31.585987 | 95 | 0.461182 |
625e9cbf182dc0eb979c1b3f1c47f3fbf5a6c317 | 2,312 | use std::str::FromStr;
use aoc_runner_derive::aoc;
use recap::Recap;
use serde::Deserialize;
#[derive(Debug, Deserialize, Hash, PartialEq, Recap)]
#[recap(regex = r#"(?x)
(?P<number_1>\d+)
-
(?P<number_2>\d+)
\s+
(?P<character>.)
:\s+
(?P<password>\S+)
"#)]
pub struct PasswordValidator {
number_1: usize,
number_2: usize,
character: char,
password: String,
}
impl PasswordValidator {
fn valid_by_count(&self) -> bool {
let count = self.password.matches(self.character).count();
count >= self.number_1 && count <= self.number_2
}
// This is 1-indexed
fn has_char_at_index(&self, idx: usize) -> bool {
char::from(self.password.as_bytes()[idx - 1]) == self.character
}
fn valid_by_index(&self) -> bool {
self.has_char_at_index(self.number_1) ^ self.has_char_at_index(self.number_2)
}
}
pub fn parse(input: &str) -> impl Iterator<Item = PasswordValidator> + '_ {
input.lines().map(FromStr::from_str).map(|x| x.unwrap())
}
#[aoc(day2, part1)]
pub fn part_1(input: &str) -> usize {
parse(input).filter(|x| x.valid_by_count()).count()
}
#[aoc(day2, part2)]
pub fn part_2(input: &str) -> usize {
parse(input).filter(|x| x.valid_by_index()).count()
}
#[cfg(test)]
mod tests {
use super::*;
static SAMPLE: &str = "1-3 a: abcde\n1-3 b: cdefg\n2-9 c: ccccccccc";
#[test]
fn test_parse() {
assert_eq!(
parse(SAMPLE).collect::<Vec<_>>(),
vec!(
PasswordValidator {
number_1: 1,
number_2: 3,
character: 'a',
password: "abcde".into()
},
PasswordValidator {
number_1: 1,
number_2: 3,
character: 'b',
password: "cdefg".into()
},
PasswordValidator {
number_1: 2,
number_2: 9,
character: 'c',
password: "ccccccccc".into()
}
)
);
}
#[test]
fn test_part_1() {
assert_eq!(part_1(SAMPLE), 2);
}
#[test]
fn test_part_2() {
assert_eq!(part_2(SAMPLE), 1);
}
}
| 23.835052 | 85 | 0.508218 |
690cfd5627d5c9bf98cfe67c6834ef9da653a0c0 | 276 | use crate::model::link::WrappedUri;
/// Pagination links
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct Pagination {
pub first: Option<WrappedUri>,
pub prev: Option<WrappedUri>,
pub next: Option<WrappedUri>,
pub last: Option<WrappedUri>,
}
| 27.6 | 55 | 0.710145 |
28dcd0f42c0625e65b01b6cec2fe78986797f363 | 880 | // option2.rs
// Make me compile! Execute `rustlings hint option2` for hints
fn main() {
let optional_value = Some(String::from("rustlings"));
// TODO: Make this an if let statement whose value is "Some" type
if let Some(value) = optional_value {
println!("the value of optional value is: {}", value);
} else {
println!("The optional value doesn't contain anything!");
}
let mut optional_values_vec: Vec<Option<i8>> = Vec::new();
for x in 1..10 {
optional_values_vec.push(Some(x));
}
// TODO: make this a while let statement - remember that vector.pop also adds another layer of Option<T>
// You can stack `Option<T>`'s into while let and if let
while let Some(optional) = optional_values_vec.pop() {
if let Some(value) = optional {
println!("current value: {}", value);
}
}
}
| 33.846154 | 108 | 0.625 |
8fc3468adbbe1c6bb106b84fc7f0151b4c25e589 | 57 | org.jfree.chart.renderer.category.StackedBarRendererTest
| 28.5 | 56 | 0.894737 |
9b71559100567c1642502279b62ef14c8135364a | 1,042 | // Copyright (c) The Starcoin Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::cli_state::CliState;
use crate::StarcoinOpt;
use anyhow::Result;
use scmd::{CommandAction, ExecContext};
use starcoin_account_api::AccountInfo;
use structopt::StructOpt;
/// Create a new account
#[derive(Debug, StructOpt, Default)]
#[structopt(name = "create")]
pub struct CreateOpt {
#[structopt(short = "p")]
password: String,
}
pub struct CreateCommand;
impl CommandAction for CreateCommand {
type State = CliState;
type GlobalOpt = StarcoinOpt;
type Opt = CreateOpt;
type ReturnItem = AccountInfo;
fn run(
&self,
ctx: &ExecContext<Self::State, Self::GlobalOpt, Self::Opt>,
) -> Result<AccountInfo> {
let account_client = ctx.state().account_client();
let account = account_client.create_account(ctx.opt().password.clone())?;
Ok(account)
}
fn skip_history(&self, _ctx: &ExecContext<Self::State, Self::GlobalOpt, Self::Opt>) -> bool {
true
}
}
| 26.05 | 97 | 0.673704 |
14f4db277edb1c57c95a8e8b7e2587d793872740 | 7,853 | //! A library that provides a more flexible way to construct
//! and extend the recursive function.
//!
//! The `RecurFn` trait is an abstraction of a recursive function.
//! By accepting a function parameter `recur` as the recursion
//! rather than recurring directly, it makes constructing an
//! anonymous recursive function possible.
//!
//! ```
//! use recur_fn::{recur_fn, RecurFn};
//!
//! let fib = recur_fn(|fib, n: i32| {
//! if n <= 1 {
//! n
//! } else {
//! fib(n - 1) + fib(n - 2)
//! }
//! });
//!
//! assert_eq!(55, fib.call(10));
//! ```
//!
//! Beside, it makes extending the body of a recursive function possible.
//!
//! ```
//! use recur_fn::{recur_fn, RecurFn};
//! use std::cell::RefCell;
//!
//! let fib = recur_fn(|fib, n: i32| {
//! if n <= 1 {
//! n
//! } else {
//! fib(n - 1) + fib(n - 2)
//! }
//! });
//!
//! let log = RefCell::new(Vec::new());
//!
//! let fib_with_logging = recur_fn(|recur, n: i32| {
//! log.borrow_mut().push(n);
//! fib.body(recur, n)
//! });
//!
//! fib_with_logging.call(3);
//! assert_eq!(*log.borrow(), vec![3, 2, 1, 0, 1]);
//! ```
//!
//! As `recur_fn` is a convenient way to construct a `RecurFn`,
//! calling it is slower than direct recursion.
//! To make it zero-cost, consider defining a struct,
//! implementing `RecurFn` trait for it and mark the `body` method by `#[inline]`.
//!
//! ```
//! use recur_fn::RecurFn;
//!
//! let fib = {
//! struct Fib {}
//! impl RecurFn<i32, i32> for Fib {
//! #[inline]
//! fn body(&self, fib: impl Fn(i32) -> i32, n: i32) -> i32 {
//! if n <= 1 {
//! n
//! } else {
//! fib(n - 1) + fib(n - 2)
//! }
//! }
//! }
//! Fib {}
//! };
//!
//! assert_eq!(55, fib.call(10));
//! ```
//!
//! or if the function doesn't capture anything,
//! you can use `recur_fn` macro.
//!
//! ```
//! use recur_fn::{recur_fn, RecurFn};
//!
//! let fact = recur_fn!(fact(n: i32) -> i32 {
//! if n == 0 { 1 } else { n * fact(n - 1) }
//! });
//! assert_eq!(6, fact.call(3));
//! assert_eq!(0,
//! fact.body(|_| 0, 3));
//! ```
//!
//! `DynRecurFn` is a dynamic version of `RecurFn`
//! that allows you to have a trait object.
//!
//! ```
//! use recur_fn::{recur_fn, RecurFn, DynRecurFn};
//! use core::ops::Deref;
//!
//! let dyn_fact: &dyn DynRecurFn<_, _> =
//! &recur_fn(|fact, n: i32| if n == 0 { 1 } else { n * fact(n - 1) });
//! ```
#![no_std]
use core::ops::Deref;
/// The recursive function trait.
///
/// Instead of recurring directly,
/// this trait allows user to customize the recursion.
/// In this way, we can extract and extend the body of a recursive function.
///
/// This trait supports only one argument.
/// If you need multiple arguments, use tuple.
pub trait RecurFn<Arg, Output> {
/// The body of the recursive function.
///
/// Marking this method by `#[inline]` will make the `call` method
/// as fast as recurring directly.
fn body(&self, recur: impl Fn(Arg) -> Output, arg: Arg) -> Output;
/// Calls the recursive function.
#[inline]
fn call(&self, arg: Arg) -> Output {
self.body(|arg| self.call(arg), arg)
}
}
/// The dynamic version of `RecurFn` that supports trait object.
pub trait DynRecurFn<Arg, Output> {
/// The body of the recursive function.
fn dyn_body(&self, recur: &dyn Fn(Arg) -> Output, arg: Arg) -> Output;
}
impl<Arg, Output, R> DynRecurFn<Arg, Output> for R
where
R: RecurFn<Arg, Output>,
{
fn dyn_body(&self, recur: &dyn Fn(Arg) -> Output, arg: Arg) -> Output {
self.body(recur, arg)
}
}
macro_rules! impl_dyn_with_markers {
($($marker:ident),*) => {
impl<'a, Arg, Output> RecurFn<Arg, Output> for dyn DynRecurFn<Arg, Output> + 'a$( + $marker)*
{
fn body(&self, recur: impl Fn(Arg) -> Output, arg: Arg) -> Output {
self.dyn_body(&recur, arg)
}
}
};
}
impl_dyn_with_markers! {}
impl_dyn_with_markers! {Send}
impl_dyn_with_markers! {Sync}
impl_dyn_with_markers! {Send, Sync}
/// A `RecurFn` that delegates to a pointer to a `RecurFn`.
pub struct FromPointer<D>(D);
impl<Arg, Output, D> RecurFn<Arg, Output> for FromPointer<D>
where
D: Deref,
D::Target: RecurFn<Arg, Output>,
{
#[inline]
fn body(&self, recur: impl Fn(Arg) -> Output, arg: Arg) -> Output {
self.0.body(recur, arg)
}
}
/// Returns a `RecurFn` implementation from a pointer
/// to `RecurFn`, i.e. a implementation of `Deref` whose `Target`
/// implements `RecurFn`.
///
/// # Examples
///
/// ```
/// use recur_fn::{RecurFn, recur_fn, from_pointer};
///
/// fn test_fact(fact: impl RecurFn<i32, i32>) {
/// assert_eq!(fact.call(5), 120);
/// }
/// let box_fact = Box::new(recur_fn(
/// |fact, n: i32| {
/// if n <= 1 {
/// 1
/// } else {
/// n * fact(n - 1)
/// }
/// },
/// ));
/// test_fact(from_pointer(box_fact));
/// ```
pub fn from_pointer<Arg, Output, D>(d: D) -> FromPointer<D>
where
D: Deref,
D::Target: RecurFn<Arg, Output>,
{
FromPointer(d)
}
/// A `RecurFn` that doesn't call `recur` parameter in its body.
pub struct Direct<F>(F);
impl<Arg, Output, F> RecurFn<Arg, Output> for Direct<F>
where
F: Fn(Arg) -> Output,
{
#[inline]
fn body(&self, _: impl Fn(Arg) -> Output, arg: Arg) -> Output {
(self.0)(arg)
}
fn call(&self, arg: Arg) -> Output {
(self.0)(arg)
}
}
/// Constructs a non-recursive `RecurFn` calling `f` directly.
///
/// # Examples
///
/// ```
/// use recur_fn::{RecurFn, direct};
///
/// let double = direct(|n: i32| n * 2);
/// assert_eq!(4, double.call(2));
/// assert_eq!(20, double.body(|_| 0, 10));
/// ```
pub fn direct<Arg, Output, F: Fn(Arg) -> Output>(f: F) -> Direct<F> {
Direct(f)
}
/// A `RecurFn` that uses a closure as the body.
pub struct Closure<F>(F);
impl<Arg, Output, F> RecurFn<Arg, Output> for Closure<F>
where
F: Fn(&dyn Fn(Arg) -> Output, Arg) -> Output,
{
fn body(&self, recur: impl Fn(Arg) -> Output, arg: Arg) -> Output {
self.0(&recur, arg)
}
fn call(&self, arg: Arg) -> Output {
self.0(&|arg| self.call(arg), arg)
}
}
/// Constructs a `RecurFn` with the body speicified.
///
/// # Examples
///
/// ```
/// use recur_fn::{recur_fn, RecurFn};
///
/// let fib = recur_fn(|fib, n: i32| {
/// if n <= 1 {
/// n
/// } else {
/// fib(n - 1) + fib(n - 2)
/// }
/// });
///
/// assert_eq!(55, fib.call(10));
/// ```
pub fn recur_fn<Arg, Output, F>(body: F) -> Closure<F>
where
F: Fn(&dyn Fn(Arg) -> Output, Arg) -> Output,
{
Closure(body)
}
/// Expands a function definition to defining a struct,
/// implementing `RecurFn` for the struct and constructing it.
/// It can be useful if you want a zero-cost `RecurFn` implementation.
///
/// The function should have exactly one argument.
/// `impl Trait`s and generics are not supported.
///
/// # Examples
///
/// ```
/// use recur_fn::{recur_fn, RecurFn};
///
/// let fact = recur_fn!(fact(n: i32) -> i32 {
/// if n == 0 { 1 } else { n * fact(n - 1) }
/// });
/// assert_eq!(6, fact.call(3));
/// assert_eq!(0,
/// fact.body(|_| 0, 3));
/// ```
#[macro_export]
macro_rules! recur_fn {
($fn_name:ident ($arg_name:ident: $arg_type:ty) -> $output_type:ty $body:block) => {{
#[allow(non_camel_case_types)]
struct $fn_name {}
impl RecurFn<$arg_type, $output_type> for $fn_name {
#[inline]
fn body(
&self,
$fn_name: impl Fn($arg_type) -> $output_type,
$arg_name: $arg_type,
) -> $output_type {
$body
}
}
$fn_name {}
}};
}
#[cfg(test)]
mod tests;
| 25.250804 | 101 | 0.546543 |
18edb603c6cf1ffcc3be91ae2ca7a11cf5bc43b6 | 26,724 | //! Implements "Stacked Borrows". See <https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md>
//! for further information.
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::num::NonZeroU64;
use std::rc::Rc;
use rustc::hir::Mutability;
use rustc::mir::RetagKind;
use rustc::ty::{self, layout::Size};
use crate::{
AllocId, HelpersEvalContextExt, ImmTy, Immediate, InterpResult, MPlaceTy, MemoryKind,
MiriMemoryKind, PlaceTy, Pointer, RangeMap, TerminationInfo,
};
pub type PtrId = NonZeroU64;
pub type CallId = NonZeroU64;
pub type AllocExtra = Stacks;
/// Tracking pointer provenance
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
pub enum Tag {
Tagged(PtrId),
Untagged,
}
impl fmt::Debug for Tag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Tag::Tagged(id) => write!(f, "<{}>", id),
Tag::Untagged => write!(f, "<untagged>"),
}
}
}
/// Indicates which permission is granted (by this item to some pointers)
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Permission {
/// Grants unique mutable access.
Unique,
/// Grants shared mutable access.
SharedReadWrite,
/// Grants shared read-only access.
SharedReadOnly,
/// Grants no access, but separates two groups of SharedReadWrite so they are not
/// all considered mutually compatible.
Disabled,
}
/// An item in the per-location borrow stack.
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
pub struct Item {
/// The permission this item grants.
perm: Permission,
/// The pointers the permission is granted to.
tag: Tag,
/// An optional protector, ensuring the item cannot get popped until `CallId` is over.
protector: Option<CallId>,
}
impl fmt::Debug for Item {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "[{:?} for {:?}", self.perm, self.tag)?;
if let Some(call) = self.protector {
write!(f, " (call {})", call)?;
}
write!(f, "]")?;
Ok(())
}
}
/// Extra per-location state.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Stack {
/// Used *mostly* as a stack; never empty.
/// Invariants:
/// * Above a `SharedReadOnly` there can only be more `SharedReadOnly`.
/// * Except for `Untagged`, no tag occurs in the stack more than once.
borrows: Vec<Item>,
}
/// Extra per-allocation state.
#[derive(Clone, Debug)]
pub struct Stacks {
// Even reading memory can have effects on the stack, so we need a `RefCell` here.
stacks: RefCell<RangeMap<Stack>>,
// Pointer to global state
global: MemoryExtra,
}
/// Extra global state, available to the memory access hooks.
#[derive(Debug)]
pub struct GlobalState {
/// Next unused pointer ID (tag).
next_ptr_id: PtrId,
/// Table storing the "base" tag for each allocation.
/// The base tag is the one used for the initial pointer.
/// We need this in a separate table to handle cyclic statics.
base_ptr_ids: HashMap<AllocId, Tag>,
/// Next unused call ID (for protectors).
next_call_id: CallId,
/// Those call IDs corresponding to functions that are still running.
active_calls: HashSet<CallId>,
/// The id to trace in this execution run
tracked_pointer_tag: Option<PtrId>,
}
/// Memory extra state gives us interior mutable access to the global state.
pub type MemoryExtra = Rc<RefCell<GlobalState>>;
/// Indicates which kind of access is being performed.
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
pub enum AccessKind {
Read,
Write,
}
impl fmt::Display for AccessKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
AccessKind::Read => write!(f, "read access"),
AccessKind::Write => write!(f, "write access"),
}
}
}
/// Indicates which kind of reference is being created.
/// Used by high-level `reborrow` to compute which permissions to grant to the
/// new pointer.
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
pub enum RefKind {
/// `&mut` and `Box`.
Unique { two_phase: bool },
/// `&` with or without interior mutability.
Shared,
/// `*mut`/`*const` (raw pointers).
Raw { mutable: bool },
}
impl fmt::Display for RefKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
RefKind::Unique { two_phase: false } => write!(f, "unique"),
RefKind::Unique { two_phase: true } => write!(f, "unique (two-phase)"),
RefKind::Shared => write!(f, "shared"),
RefKind::Raw { mutable: true } => write!(f, "raw (mutable)"),
RefKind::Raw { mutable: false } => write!(f, "raw (constant)"),
}
}
}
/// Utilities for initialization and ID generation
impl GlobalState {
pub fn new(tracked_pointer_tag: Option<PtrId>) -> Self {
GlobalState {
next_ptr_id: NonZeroU64::new(1).unwrap(),
base_ptr_ids: HashMap::default(),
next_call_id: NonZeroU64::new(1).unwrap(),
active_calls: HashSet::default(),
tracked_pointer_tag,
}
}
fn new_ptr(&mut self) -> PtrId {
let id = self.next_ptr_id;
self.next_ptr_id = NonZeroU64::new(id.get() + 1).unwrap();
id
}
pub fn new_call(&mut self) -> CallId {
let id = self.next_call_id;
trace!("new_call: Assigning ID {}", id);
assert!(self.active_calls.insert(id));
self.next_call_id = NonZeroU64::new(id.get() + 1).unwrap();
id
}
pub fn end_call(&mut self, id: CallId) {
assert!(self.active_calls.remove(&id));
}
fn is_active(&self, id: CallId) -> bool {
self.active_calls.contains(&id)
}
pub fn static_base_ptr(&mut self, id: AllocId) -> Tag {
self.base_ptr_ids.get(&id).copied().unwrap_or_else(|| {
let tag = Tag::Tagged(self.new_ptr());
trace!("New allocation {:?} has base tag {:?}", id, tag);
self.base_ptr_ids.insert(id, tag).unwrap_none();
tag
})
}
}
// # Stacked Borrows Core Begin
/// We need to make at least the following things true:
///
/// U1: After creating a `Uniq`, it is at the top.
/// U2: If the top is `Uniq`, accesses must be through that `Uniq` or remove it it.
/// U3: If an access happens with a `Uniq`, it requires the `Uniq` to be in the stack.
///
/// F1: After creating a `&`, the parts outside `UnsafeCell` have our `SharedReadOnly` on top.
/// F2: If a write access happens, it pops the `SharedReadOnly`. This has three pieces:
/// F2a: If a write happens granted by an item below our `SharedReadOnly`, the `SharedReadOnly`
/// gets popped.
/// F2b: No `SharedReadWrite` or `Unique` will ever be added on top of our `SharedReadOnly`.
/// F3: If an access happens with an `&` outside `UnsafeCell`,
/// it requires the `SharedReadOnly` to still be in the stack.
/// Core relation on `Permission` to define which accesses are allowed
impl Permission {
/// This defines for a given permission, whether it permits the given kind of access.
fn grants(self, access: AccessKind) -> bool {
// Disabled grants nothing. Otherwise, all items grant read access, and except for SharedReadOnly they grant write access.
self != Permission::Disabled
&& (access == AccessKind::Read || self != Permission::SharedReadOnly)
}
}
/// Core per-location operations: access, dealloc, reborrow.
impl<'tcx> Stack {
/// Find the item granting the given kind of access to the given tag, and return where
/// it is on the stack.
fn find_granting(&self, access: AccessKind, tag: Tag) -> Option<usize> {
self.borrows
.iter()
.enumerate() // we also need to know *where* in the stack
.rev() // search top-to-bottom
// Return permission of first item that grants access.
// We require a permission with the right tag, ensuring U3 and F3.
.find_map(
|(idx, item)| {
if tag == item.tag && item.perm.grants(access) { Some(idx) } else { None }
},
)
}
/// Find the first write-incompatible item above the given one --
/// i.e, find the height to which the stack will be truncated when writing to `granting`.
fn find_first_write_incompatible(&self, granting: usize) -> usize {
let perm = self.borrows[granting].perm;
match perm {
Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
Permission::Disabled => bug!("Cannot use Disabled for anything"),
// On a write, everything above us is incompatible.
Permission::Unique => granting + 1,
Permission::SharedReadWrite => {
// The SharedReadWrite *just* above us are compatible, to skip those.
let mut idx = granting + 1;
while let Some(item) = self.borrows.get(idx) {
if item.perm == Permission::SharedReadWrite {
// Go on.
idx += 1;
} else {
// Found first incompatible!
break;
}
}
idx
}
}
}
/// Check if the given item is protected.
fn check_protector(item: &Item, tag: Option<Tag>, global: &GlobalState) -> InterpResult<'tcx> {
if let Tag::Tagged(id) = item.tag {
if Some(id) == global.tracked_pointer_tag {
throw_machine_stop!(TerminationInfo::PoppedTrackedPointerTag(item.clone()));
}
}
if let Some(call) = item.protector {
if global.is_active(call) {
if let Some(tag) = tag {
throw_ub!(UbExperimental(format!(
"not granting access to tag {:?} because incompatible item is protected: {:?}",
tag, item
)));
} else {
throw_ub!(UbExperimental(format!(
"deallocating while item is protected: {:?}",
item
)));
}
}
}
Ok(())
}
/// Test if a memory `access` using pointer tagged `tag` is granted.
/// If yes, return the index of the item that granted it.
fn access(&mut self, access: AccessKind, tag: Tag, global: &GlobalState) -> InterpResult<'tcx> {
// Two main steps: Find granting item, remove incompatible items above.
// Step 1: Find granting item.
let granting_idx = self.find_granting(access, tag).ok_or_else(|| {
err_ub!(UbExperimental(format!(
"no item granting {} to tag {:?} found in borrow stack",
access, tag,
)))
})?;
// Step 2: Remove incompatible items above them. Make sure we do not remove protected
// items. Behavior differs for reads and writes.
if access == AccessKind::Write {
// Remove everything above the write-compatible items, like a proper stack. This makes sure read-only and unique
// pointers become invalid on write accesses (ensures F2a, and ensures U2 for write accesses).
let first_incompatible_idx = self.find_first_write_incompatible(granting_idx);
for item in self.borrows.drain(first_incompatible_idx..).rev() {
trace!("access: popping item {:?}", item);
Stack::check_protector(&item, Some(tag), global)?;
}
} else {
// On a read, *disable* all `Unique` above the granting item. This ensures U2 for read accesses.
// The reason this is not following the stack discipline (by removing the first Unique and
// everything on top of it) is that in `let raw = &mut *x as *mut _; let _val = *x;`, the second statement
// would pop the `Unique` from the reborrow of the first statement, and subsequently also pop the
// `SharedReadWrite` for `raw`.
// This pattern occurs a lot in the standard library: create a raw pointer, then also create a shared
// reference and use that.
// We *disable* instead of removing `Unique` to avoid "connecting" two neighbouring blocks of SRWs.
for idx in ((granting_idx + 1)..self.borrows.len()).rev() {
let item = &mut self.borrows[idx];
if item.perm == Permission::Unique {
trace!("access: disabling item {:?}", item);
Stack::check_protector(item, Some(tag), global)?;
item.perm = Permission::Disabled;
}
}
}
// Done.
Ok(())
}
/// Deallocate a location: Like a write access, but also there must be no
/// active protectors at all because we will remove all items.
fn dealloc(&mut self, tag: Tag, global: &GlobalState) -> InterpResult<'tcx> {
// Step 1: Find granting item.
self.find_granting(AccessKind::Write, tag).ok_or_else(|| {
err_ub!(UbExperimental(format!(
"no item granting write access for deallocation to tag {:?} found in borrow stack",
tag,
)))
})?;
// Step 2: Remove all items. Also checks for protectors.
for item in self.borrows.drain(..).rev() {
Stack::check_protector(&item, None, global)?;
}
Ok(())
}
/// Derived a new pointer from one with the given tag.
/// `weak` controls whether this operation is weak or strong: weak granting does not act as
/// an access, and they add the new item directly on top of the one it is derived
/// from instead of all the way at the top of the stack.
fn grant(&mut self, derived_from: Tag, new: Item, global: &GlobalState) -> InterpResult<'tcx> {
// Figure out which access `perm` corresponds to.
let access =
if new.perm.grants(AccessKind::Write) { AccessKind::Write } else { AccessKind::Read };
// Now we figure out which item grants our parent (`derived_from`) this kind of access.
// We use that to determine where to put the new item.
let granting_idx = self.find_granting(access, derived_from)
.ok_or_else(|| err_ub!(UbExperimental(format!(
"trying to reborrow for {:?}, but parent tag {:?} does not have an appropriate item in the borrow stack", new.perm, derived_from,
))))?;
// Compute where to put the new item.
// Either way, we ensure that we insert the new item in a way such that between
// `derived_from` and the new one, there are only items *compatible with* `derived_from`.
let new_idx = if new.perm == Permission::SharedReadWrite {
assert!(
access == AccessKind::Write,
"this case only makes sense for stack-like accesses"
);
// SharedReadWrite can coexist with "existing loans", meaning they don't act like a write
// access. Instead of popping the stack, we insert the item at the place the stack would
// be popped to (i.e., we insert it above all the write-compatible items).
// This ensures F2b by adding the new item below any potentially existing `SharedReadOnly`.
self.find_first_write_incompatible(granting_idx)
} else {
// A "safe" reborrow for a pointer that actually expects some aliasing guarantees.
// Here, creating a reference actually counts as an access.
// This ensures F2b for `Unique`, by removing offending `SharedReadOnly`.
self.access(access, derived_from, global)?;
// We insert "as far up as possible": We know only compatible items are remaining
// on top of `derived_from`, and we want the new item at the top so that we
// get the strongest possible guarantees.
// This ensures U1 and F1.
self.borrows.len()
};
// Put the new item there. As an optimization, deduplicate if it is equal to one of its new neighbors.
if self.borrows[new_idx - 1] == new || self.borrows.get(new_idx) == Some(&new) {
// Optimization applies, done.
trace!("reborrow: avoiding adding redundant item {:?}", new);
} else {
trace!("reborrow: adding item {:?}", new);
self.borrows.insert(new_idx, new);
}
Ok(())
}
}
// # Stacked Borrows Core End
/// Map per-stack operations to higher-level per-location-range operations.
impl<'tcx> Stacks {
/// Creates new stack with initial tag.
fn new(size: Size, perm: Permission, tag: Tag, extra: MemoryExtra) -> Self {
let item = Item { perm, tag, protector: None };
let stack = Stack { borrows: vec![item] };
Stacks { stacks: RefCell::new(RangeMap::new(size, stack)), global: extra }
}
/// Call `f` on every stack in the range.
fn for_each(
&self,
ptr: Pointer<Tag>,
size: Size,
f: impl Fn(&mut Stack, &GlobalState) -> InterpResult<'tcx>,
) -> InterpResult<'tcx> {
let global = self.global.borrow();
let mut stacks = self.stacks.borrow_mut();
for stack in stacks.iter_mut(ptr.offset, size) {
f(stack, &*global)?;
}
Ok(())
}
}
/// Glue code to connect with Miri Machine Hooks
impl Stacks {
pub fn new_allocation(
id: AllocId,
size: Size,
extra: MemoryExtra,
kind: MemoryKind<MiriMemoryKind>,
) -> (Self, Tag) {
let (tag, perm) = match kind {
// New unique borrow. This tag is not accessible by the program,
// so it will only ever be used when using the local directly (i.e.,
// not through a pointer). That is, whenever we directly write to a local, this will pop
// everything else off the stack, invalidating all previous pointers,
// and in particular, *all* raw pointers.
MemoryKind::Stack => (Tag::Tagged(extra.borrow_mut().new_ptr()), Permission::Unique),
// Static memory can be referenced by "global" pointers from `tcx`.
// Thus we call `static_base_ptr` such that the global pointers get the same tag
// as what we use here.
// The base pointer is not unique, so the base permission is `SharedReadWrite`.
MemoryKind::Machine(MiriMemoryKind::Static) =>
(extra.borrow_mut().static_base_ptr(id), Permission::SharedReadWrite),
// Everything else we handle entirely untagged for now.
// FIXME: experiment with more precise tracking.
_ => (Tag::Untagged, Permission::SharedReadWrite),
};
(Stacks::new(size, perm, tag, extra), tag)
}
#[inline(always)]
pub fn memory_read<'tcx>(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
trace!("read access with tag {:?}: {:?}, size {}", ptr.tag, ptr.erase_tag(), size.bytes());
self.for_each(ptr, size, |stack, global| {
stack.access(AccessKind::Read, ptr.tag, global)?;
Ok(())
})
}
#[inline(always)]
pub fn memory_written<'tcx>(&mut self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
trace!("write access with tag {:?}: {:?}, size {}", ptr.tag, ptr.erase_tag(), size.bytes());
self.for_each(ptr, size, |stack, global| {
stack.access(AccessKind::Write, ptr.tag, global)?;
Ok(())
})
}
#[inline(always)]
pub fn memory_deallocated<'tcx>(
&mut self,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
trace!("deallocation with tag {:?}: {:?}, size {}", ptr.tag, ptr.erase_tag(), size.bytes());
self.for_each(ptr, size, |stack, global| stack.dealloc(ptr.tag, global))
}
}
/// Retagging/reborrowing. There is some policy in here, such as which permissions
/// to grant for which references, and when to add protectors.
impl<'mir, 'tcx> EvalContextPrivExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
fn reborrow(
&mut self,
place: MPlaceTy<'tcx, Tag>,
size: Size,
kind: RefKind,
new_tag: Tag,
protect: bool,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let protector = if protect { Some(this.frame().extra.call_id) } else { None };
let ptr = place.ptr.assert_ptr();
trace!(
"reborrow: {} reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
kind,
new_tag,
ptr.tag,
place.layout.ty,
ptr.erase_tag(),
size.bytes()
);
// Get the allocation. It might not be mutable, so we cannot use `get_mut`.
let extra = &this.memory.get_raw(ptr.alloc_id)?.extra;
let stacked_borrows =
extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data");
// Update the stacks.
// Make sure that raw pointers and mutable shared references are reborrowed "weak":
// There could be existing unique pointers reborrowed from them that should remain valid!
let perm = match kind {
RefKind::Unique { two_phase: false } => Permission::Unique,
RefKind::Unique { two_phase: true } => Permission::SharedReadWrite,
RefKind::Raw { mutable: true } => Permission::SharedReadWrite,
RefKind::Shared | RefKind::Raw { mutable: false } => {
// Shared references and *const are a whole different kind of game, the
// permission is not uniform across the entire range!
// We need a frozen-sensitive reborrow.
return this.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| {
// We are only ever `SharedReadOnly` inside the frozen bits.
let perm = if frozen {
Permission::SharedReadOnly
} else {
Permission::SharedReadWrite
};
let item = Item { perm, tag: new_tag, protector };
stacked_borrows.for_each(cur_ptr, size, |stack, global| {
stack.grant(cur_ptr.tag, item, global)
})
});
}
};
let item = Item { perm, tag: new_tag, protector };
stacked_borrows.for_each(ptr, size, |stack, global| stack.grant(ptr.tag, item, global))
}
/// Retags an indidual pointer, returning the retagged version.
/// `mutbl` can be `None` to make this a raw pointer.
fn retag_reference(
&mut self,
val: ImmTy<'tcx, Tag>,
kind: RefKind,
protect: bool,
) -> InterpResult<'tcx, Immediate<Tag>> {
let this = self.eval_context_mut();
// We want a place for where the ptr *points to*, so we get one.
let place = this.ref_to_mplace(val)?;
let size = this
.size_and_align_of_mplace(place)?
.map(|(size, _)| size)
.unwrap_or_else(|| place.layout.size);
// We can see dangling ptrs in here e.g. after a Box's `Unique` was
// updated using "self.0 = ..." (can happen in Box::from_raw); see miri#1050.
let place = this.mplace_access_checked(place)?;
if size == Size::ZERO {
// Nothing to do for ZSTs.
return Ok(*val);
}
// Compute new borrow.
let new_tag = match kind {
// Give up tracking for raw pointers.
// FIXME: Experiment with more precise tracking. Blocked on `&raw`
// because `Rc::into_raw` currently creates intermediate references,
// breaking `Rc::from_raw`.
RefKind::Raw { .. } => Tag::Untagged,
// All other pointesr are properly tracked.
_ => Tag::Tagged(this.memory.extra.stacked_borrows.borrow_mut().new_ptr()),
};
// Reborrow.
this.reborrow(place, size, kind, new_tag, protect)?;
let new_place = place.replace_tag(new_tag);
// Return new pointer.
Ok(new_place.to_ref())
}
}
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
fn retag(&mut self, kind: RetagKind, place: PlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
// Determine mutability and whether to add a protector.
// Cannot use `builtin_deref` because that reports *immutable* for `Box`,
// making it useless.
fn qualify(ty: ty::Ty<'_>, kind: RetagKind) -> Option<(RefKind, bool)> {
match ty.kind {
// References are simple.
ty::Ref(_, _, Mutability::Mut) => Some((
RefKind::Unique { two_phase: kind == RetagKind::TwoPhase },
kind == RetagKind::FnEntry,
)),
ty::Ref(_, _, Mutability::Not) =>
Some((RefKind::Shared, kind == RetagKind::FnEntry)),
// Raw pointers need to be enabled.
ty::RawPtr(tym) if kind == RetagKind::Raw =>
Some((RefKind::Raw { mutable: tym.mutbl == Mutability::Mut }, false)),
// Boxes do not get a protector: protectors reflect that references outlive the call
// they were passed in to; that's just not the case for boxes.
ty::Adt(..) if ty.is_box() => Some((RefKind::Unique { two_phase: false }, false)),
_ => None,
}
}
// We only reborrow "bare" references/boxes.
// Not traversing into fields helps with <https://github.com/rust-lang/unsafe-code-guidelines/issues/125>,
// but might also cost us optimization and analyses. We will have to experiment more with this.
if let Some((mutbl, protector)) = qualify(place.layout.ty, kind) {
// Fast path.
let val = this.read_immediate(this.place_to_op(place)?)?;
let val = this.retag_reference(val, mutbl, protector)?;
this.write_immediate(val, place)?;
}
Ok(())
}
}
| 42.018868 | 145 | 0.585691 |
d937763c78be6d86bd64dd3333e1b4fa232f41e7 | 25,361 | // Copyright (c) Microsoft. All rights reserved.
use std::cmp::Ordering;
use std::path::{Path, PathBuf};
use regex::Regex;
use url::Url;
use url_serde;
use crate::crypto::MemoryKey;
use crate::error::{Error, ErrorKind};
use crate::module::ModuleSpec;
use crate::DEFAULT_AUTO_GENERATED_CA_LIFETIME_DAYS;
const DEVICEID_KEY: &str = "DeviceId";
const HOSTNAME_KEY: &str = "HostName";
const SHAREDACCESSKEY_KEY: &str = "SharedAccessKey";
const DEVICEID_REGEX: &str = r"^[A-Za-z0-9\-:.+%_#*?!(),=@;$']{1,128}$";
const HOSTNAME_REGEX: &str = r"^[a-zA-Z0-9_\-\.]+$";
/// This is the default connection string
pub const DEFAULT_CONNECTION_STRING: &str = "<ADD DEVICE CONNECTION STRING HERE>";
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
#[serde(rename_all = "lowercase")]
pub struct ManualX509Auth {
iothub_hostname: String,
device_id: String,
#[serde(with = "url_serde")]
identity_cert: Url,
#[serde(with = "url_serde")]
identity_pk: Url,
}
impl ManualX509Auth {
pub fn iothub_hostname(&self) -> &str {
&self.iothub_hostname
}
pub fn device_id(&self) -> &str {
&self.device_id
}
pub fn identity_cert(&self) -> Result<PathBuf, Error> {
get_path_from_uri(
&self.identity_cert,
"provisioning.authentication.identity_cert",
)
}
pub fn identity_pk(&self) -> Result<PathBuf, Error> {
get_path_from_uri(&self.identity_pk, "provisioning.authentication.identity_pk")
}
pub fn identity_pk_uri(&self) -> Result<&Url, Error> {
if is_supported_uri(&self.identity_pk) {
Ok(&self.identity_pk)
} else {
Err(Error::from(ErrorKind::UnsupportedSettingsUri(
self.identity_pk.to_string(),
"provisioning.authentication.identity_pk",
)))
}
}
pub fn identity_cert_uri(&self) -> Result<&Url, Error> {
if is_supported_uri(&self.identity_cert) {
Ok(&self.identity_cert)
} else {
Err(Error::from(ErrorKind::UnsupportedSettingsUri(
self.identity_cert.to_string(),
"provisioning.authentication.identity_cert",
)))
}
}
}
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
#[serde(rename_all = "lowercase")]
pub struct ManualDeviceConnectionString {
device_connection_string: String,
}
impl ManualDeviceConnectionString {
pub fn new(device_connection_string: String) -> Self {
ManualDeviceConnectionString {
device_connection_string,
}
}
pub fn device_connection_string(&self) -> &str {
&self.device_connection_string
}
pub fn parse_device_connection_string(&self) -> Result<(MemoryKey, String, String), Error> {
if self.device_connection_string.is_empty() {
return Err(Error::from(ErrorKind::ConnectionStringEmpty));
}
if self.device_connection_string == DEFAULT_CONNECTION_STRING {
return Err(Error::from(ErrorKind::ConnectionStringNotConfigured(
if cfg!(windows) {
"https://aka.ms/iot-edge-configure-windows"
} else {
"https://aka.ms/iot-edge-configure-linux"
},
)));
}
let mut key = None;
let mut device_id = None;
let mut hub = None;
let parts: Vec<&str> = self.device_connection_string.split(';').collect();
for p in parts {
let s: Vec<&str> = p.split('=').collect();
match s[0] {
SHAREDACCESSKEY_KEY => key = Some(s[1].to_string()),
DEVICEID_KEY => device_id = Some(s[1].to_string()),
HOSTNAME_KEY => hub = Some(s[1].to_string()),
_ => (), // Ignore extraneous component in the connection string
}
}
let key = key.ok_or(ErrorKind::ConnectionStringMissingRequiredParameter(
SHAREDACCESSKEY_KEY,
))?;
if key.is_empty() {
return Err(Error::from(ErrorKind::ConnectionStringMalformedParameter(
SHAREDACCESSKEY_KEY,
)));
}
let key = MemoryKey::new(
base64::decode(&key)
.map_err(|_| ErrorKind::ConnectionStringMalformedParameter(SHAREDACCESSKEY_KEY))?,
);
let device_id =
device_id.ok_or(ErrorKind::ConnectionStringMalformedParameter(DEVICEID_KEY))?;
let device_id_regex =
Regex::new(DEVICEID_REGEX).expect("This hard-coded regex is expected to be valid.");
if !device_id_regex.is_match(&device_id) {
return Err(Error::from(ErrorKind::ConnectionStringMalformedParameter(
DEVICEID_KEY,
)));
}
let hub = hub.ok_or(ErrorKind::ConnectionStringMissingRequiredParameter(
HOSTNAME_KEY,
))?;
let hub_regex =
Regex::new(HOSTNAME_REGEX).expect("This hard-coded regex is expected to be valid.");
if !hub_regex.is_match(&hub) {
return Err(Error::from(ErrorKind::ConnectionStringMalformedParameter(
HOSTNAME_KEY,
)));
}
Ok((key, device_id.to_owned(), hub.to_owned()))
}
}
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
#[serde(tag = "method")]
#[serde(rename_all = "lowercase")]
pub enum ManualAuthMethod {
#[serde(rename = "device_connection_string")]
DeviceConnectionString(ManualDeviceConnectionString),
X509(ManualX509Auth),
}
#[derive(Clone, Debug, serde_derive::Serialize)]
#[serde(rename_all = "lowercase")]
pub struct Manual {
authentication: ManualAuthMethod,
}
impl<'de> serde::Deserialize<'de> for Manual {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
#[derive(Debug, serde_derive::Deserialize)]
struct Inner {
#[serde(skip_serializing_if = "Option::is_none")]
device_connection_string: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
authentication: Option<ManualAuthMethod>,
}
let value: Inner = serde::Deserialize::deserialize(deserializer)?;
let authentication = match (value.device_connection_string, value.authentication) {
(Some(_), Some(_)) => {
return Err(serde::de::Error::custom(
"Only one of provisioning.device_connection_string or provisioning.authentication must be set in the config.yaml.",
));
}
(Some(cs), None) => {
ManualAuthMethod::DeviceConnectionString(ManualDeviceConnectionString::new(cs))
}
(None, Some(auth)) => auth,
(None, None) => {
return Err(serde::de::Error::custom(
"One of provisioning.device_connection_string or provisioning.authentication must be set in the config.yaml.",
));
}
};
Ok(Manual { authentication })
}
}
impl Manual {
pub fn new(authentication: ManualAuthMethod) -> Self {
Manual { authentication }
}
pub fn authentication_method(&self) -> &ManualAuthMethod {
&self.authentication
}
}
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
#[serde(tag = "method")]
#[serde(rename_all = "lowercase")]
pub enum AttestationMethod {
Tpm(TpmAttestationInfo),
#[serde(rename = "symmetric_key")]
SymmetricKey(SymmetricKeyAttestationInfo),
X509(X509AttestationInfo),
}
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
#[serde(rename_all = "lowercase")]
pub struct TpmAttestationInfo {
registration_id: String,
}
impl TpmAttestationInfo {
pub fn new(registration_id: String) -> Self {
TpmAttestationInfo { registration_id }
}
pub fn registration_id(&self) -> &str {
&self.registration_id
}
}
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
#[serde(rename_all = "lowercase")]
pub struct SymmetricKeyAttestationInfo {
registration_id: String,
symmetric_key: String,
}
impl SymmetricKeyAttestationInfo {
pub fn registration_id(&self) -> &str {
&self.registration_id
}
pub fn symmetric_key(&self) -> &str {
&self.symmetric_key
}
}
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
#[serde(rename_all = "lowercase")]
pub struct X509AttestationInfo {
#[serde(skip_serializing_if = "Option::is_none")]
registration_id: Option<String>,
#[serde(with = "url_serde")]
identity_cert: Url,
#[serde(with = "url_serde")]
identity_pk: Url,
}
impl X509AttestationInfo {
pub fn identity_cert(&self) -> Result<PathBuf, Error> {
get_path_from_uri(
&self.identity_cert,
"provisioning.attestation.identity_cert",
)
}
pub fn identity_pk(&self) -> Result<PathBuf, Error> {
get_path_from_uri(&self.identity_pk, "provisioning.attestation.identity_pk")
}
pub fn identity_pk_uri(&self) -> Result<&Url, Error> {
if is_supported_uri(&self.identity_pk) {
Ok(&self.identity_pk)
} else {
Err(Error::from(ErrorKind::UnsupportedSettingsUri(
self.identity_pk.to_string(),
"provisioning.attestation.identity_pk",
)))
}
}
pub fn identity_cert_uri(&self) -> Result<&Url, Error> {
if is_supported_uri(&self.identity_cert) {
Ok(&self.identity_cert)
} else {
Err(Error::from(ErrorKind::UnsupportedSettingsUri(
self.identity_cert.to_string(),
"provisioning.attestation.identity_cert",
)))
}
}
pub fn registration_id(&self) -> Option<&str> {
self.registration_id.as_ref().map(AsRef::as_ref)
}
}
#[derive(Clone, Debug, serde_derive::Serialize)]
pub struct Dps {
#[serde(with = "url_serde")]
global_endpoint: Url,
scope_id: String,
attestation: AttestationMethod,
}
impl<'de> serde::Deserialize<'de> for Dps {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
#[derive(Debug, serde_derive::Deserialize)]
struct Inner {
#[serde(with = "url_serde")]
global_endpoint: Url,
scope_id: String,
registration_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
attestation: Option<AttestationMethod>,
}
let value: Inner = serde::Deserialize::deserialize(deserializer)?;
let attestation = match (value.attestation, value.registration_id) {
(Some(_att), Some(_)) => {
return Err(serde::de::Error::custom(
"Provisioning registration_id has to be set only in attestation",
));
}
(Some(att), None) => att,
(None, Some(reg_id)) => {
AttestationMethod::Tpm(TpmAttestationInfo::new(reg_id.to_string()))
}
(None, None) => {
return Err(serde::de::Error::custom(
"Provisioning registration_id has to be set",
));
}
};
Ok(Dps {
global_endpoint: value.global_endpoint,
scope_id: value.scope_id,
attestation,
})
}
}
impl Dps {
pub fn global_endpoint(&self) -> &Url {
&self.global_endpoint
}
pub fn scope_id(&self) -> &str {
&self.scope_id
}
pub fn attestation(&self) -> &AttestationMethod {
&self.attestation
}
}
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
#[serde(rename_all = "lowercase")]
pub struct External {
#[serde(with = "url_serde")]
endpoint: Url,
}
impl External {
pub fn new(endpoint: Url) -> Self {
External { endpoint }
}
pub fn endpoint(&self) -> &Url {
&self.endpoint
}
}
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
#[serde(tag = "source")]
#[serde(rename_all = "lowercase")]
pub enum Provisioning {
Manual(Box<Manual>),
Dps(Box<Dps>),
External(External),
}
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
pub struct Connect {
#[serde(with = "url_serde")]
workload_uri: Url,
#[serde(with = "url_serde")]
management_uri: Url,
}
impl Connect {
pub fn workload_uri(&self) -> &Url {
&self.workload_uri
}
pub fn management_uri(&self) -> &Url {
&self.management_uri
}
}
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
pub struct Listen {
#[serde(with = "url_serde")]
workload_uri: Url,
#[serde(with = "url_serde")]
management_uri: Url,
}
impl Listen {
pub fn workload_uri(&self) -> &Url {
&self.workload_uri
}
pub fn management_uri(&self) -> &Url {
&self.management_uri
}
}
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
pub struct Certificates {
#[serde(flatten)]
device_cert: Option<DeviceCertificate>,
auto_generated_ca_lifetime_days: u16,
}
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
pub struct DeviceCertificate {
device_ca_cert: String,
device_ca_pk: String,
trusted_ca_certs: String,
}
fn is_supported_uri(uri: &Url) -> bool {
if uri.scheme() == "file" && uri.port().is_none() && uri.query().is_none() {
if let Some(host) = uri.host_str() {
return host == "localhost";
}
return true;
}
false
}
fn get_path_from_uri(uri: &Url, setting_name: &'static str) -> Result<PathBuf, Error> {
if is_supported_uri(&uri) {
let path = uri
.to_file_path()
.map_err(|()| ErrorKind::InvalidSettingsUriFilePath(uri.to_string(), setting_name))?;
Ok(path)
} else {
Err(Error::from(ErrorKind::UnsupportedSettingsFileUri(
uri.to_string(),
setting_name,
)))
}
}
fn convert_to_path(maybe_path: &str, setting_name: &'static str) -> Result<PathBuf, Error> {
if let Ok(file_uri) = Url::from_file_path(maybe_path) {
// maybe_path was specified as a valid absolute path not a URI
get_path_from_uri(&file_uri, setting_name)
} else {
// maybe_path is a URI or a relative path
if let Ok(uri) = Url::parse(maybe_path) {
get_path_from_uri(&uri, setting_name)
} else {
Ok(PathBuf::from(maybe_path))
}
}
}
fn convert_to_uri(maybe_uri: &str, setting_name: &'static str) -> Result<Url, Error> {
if let Ok(uri) = Url::parse(maybe_uri) {
// maybe_uri was specified as a URI
if is_supported_uri(&uri) {
Ok(uri)
} else {
Err(Error::from(ErrorKind::UnsupportedSettingsUri(
maybe_uri.to_owned(),
setting_name,
)))
}
} else {
// maybe_uri was specified as a valid path not a URI
Url::from_file_path(maybe_uri)
.map(|uri| {
if is_supported_uri(&uri) {
Ok(uri)
} else {
Err(Error::from(ErrorKind::UnsupportedSettingsUri(
maybe_uri.to_owned(),
setting_name,
)))
}
})
.map_err(|()| ErrorKind::InvalidSettingsUri(maybe_uri.to_owned(), setting_name))?
}
}
impl DeviceCertificate {
pub fn device_ca_cert(&self) -> Result<PathBuf, Error> {
convert_to_path(&self.device_ca_cert, "certificates.device_ca_cert")
}
pub fn device_ca_pk(&self) -> Result<PathBuf, Error> {
convert_to_path(&self.device_ca_pk, "certificates.device_ca_pk")
}
pub fn trusted_ca_certs(&self) -> Result<PathBuf, Error> {
convert_to_path(&self.trusted_ca_certs, "certificates.trusted_ca_certs")
}
pub fn device_ca_cert_uri(&self) -> Result<Url, Error> {
convert_to_uri(&self.device_ca_cert, "certificates.device_ca_cert")
}
pub fn device_ca_pk_uri(&self) -> Result<Url, Error> {
convert_to_uri(&self.device_ca_pk, "certificates.device_ca_pk")
}
pub fn trusted_ca_certs_uri(&self) -> Result<Url, Error> {
convert_to_uri(&self.trusted_ca_certs, "certificates.trusted_ca_certs")
}
}
impl Certificates {
pub fn device_cert(&self) -> Option<&DeviceCertificate> {
self.device_cert.as_ref()
}
pub fn auto_generated_ca_lifetime_seconds(&self) -> u64 {
// Convert days to seconds (86,400 seconds per day)
u64::from(self.auto_generated_ca_lifetime_days) * 86_400
}
}
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
#[serde(untagged)]
pub enum RetryLimit {
Infinite,
Num(u32),
}
impl RetryLimit {
pub fn compare(&self, right: u32) -> Ordering {
match self {
RetryLimit::Infinite => Ordering::Greater,
RetryLimit::Num(n) => n.cmp(&right),
}
}
}
impl Default for RetryLimit {
fn default() -> Self {
RetryLimit::Infinite
}
}
#[derive(Clone, Debug, Default, serde_derive::Deserialize, serde_derive::Serialize)]
pub struct WatchdogSettings {
#[serde(default)]
max_retries: RetryLimit,
}
impl WatchdogSettings {
pub fn max_retries(&self) -> &RetryLimit {
&self.max_retries
}
}
pub trait RuntimeSettings {
type Config;
fn provisioning(&self) -> &Provisioning;
fn agent(&self) -> &ModuleSpec<Self::Config>;
fn agent_mut(&mut self) -> &mut ModuleSpec<Self::Config>;
fn hostname(&self) -> &str;
fn connect(&self) -> &Connect;
fn listen(&self) -> &Listen;
fn homedir(&self) -> &Path;
fn certificates(&self) -> &Certificates;
fn watchdog(&self) -> &WatchdogSettings;
}
#[derive(Clone, Debug, serde_derive::Deserialize, serde_derive::Serialize)]
pub struct Settings<T> {
provisioning: Provisioning,
agent: ModuleSpec<T>,
hostname: String,
connect: Connect,
listen: Listen,
homedir: PathBuf,
certificates: Option<Certificates>,
#[serde(default)]
watchdog: WatchdogSettings,
}
impl<T> RuntimeSettings for Settings<T>
where
T: Clone,
{
type Config = T;
fn provisioning(&self) -> &Provisioning {
&self.provisioning
}
fn agent(&self) -> &ModuleSpec<T> {
&self.agent
}
fn agent_mut(&mut self) -> &mut ModuleSpec<T> {
&mut self.agent
}
fn hostname(&self) -> &str {
&self.hostname
}
fn connect(&self) -> &Connect {
&self.connect
}
fn listen(&self) -> &Listen {
&self.listen
}
fn homedir(&self) -> &Path {
&self.homedir
}
// Certificates is left as an option for backward compat
fn certificates(&self) -> &Certificates {
match &self.certificates {
None => &Certificates {
device_cert: None,
auto_generated_ca_lifetime_days: DEFAULT_AUTO_GENERATED_CA_LIFETIME_DAYS,
},
Some(c) => c,
}
}
fn watchdog(&self) -> &WatchdogSettings {
&self.watchdog
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_convert_to_path() {
if cfg!(windows) {
assert_eq!(
r"..\sample.txt",
convert_to_path(r"..\sample.txt", "test")
.unwrap()
.to_str()
.unwrap()
);
let expected_path = r"C:\temp\sample.txt";
assert_eq!(
expected_path,
convert_to_path(r"C:\temp\sample.txt", "test")
.unwrap()
.to_str()
.unwrap()
);
assert_eq!(
expected_path,
convert_to_path("file:///C:/temp/sample.txt", "test")
.unwrap()
.to_str()
.unwrap()
);
assert_eq!(
expected_path,
convert_to_path("file://localhost/C:/temp/sample.txt", "test")
.unwrap()
.to_str()
.unwrap()
);
assert_eq!(
expected_path,
convert_to_path("file://localhost/C:/temp/../temp/sample.txt", "test")
.unwrap()
.to_str()
.unwrap()
);
// oddly this works because the host is null since local drive is specified
assert_eq!(
expected_path,
convert_to_path("file://deadhost/C:/temp/sample.txt", "test")
.unwrap()
.to_str()
.unwrap()
);
convert_to_path("file://deadhost/temp/sample.txt", "test")
.expect_err("Non localhost host specified");
convert_to_path("https:///C:/temp/sample.txt", "test")
.expect_err("Non file scheme specified");
} else {
assert_eq!(
"./sample.txt",
convert_to_path("./sample.txt", "test")
.unwrap()
.to_str()
.unwrap()
);
let expected_path = "/tmp/sample.txt";
assert_eq!(
expected_path,
convert_to_path("/tmp/sample.txt", "test")
.unwrap()
.to_str()
.unwrap()
);
assert_eq!(
expected_path,
convert_to_path("file:///tmp/sample.txt", "test")
.unwrap()
.to_str()
.unwrap()
);
assert_eq!(
expected_path,
convert_to_path("file://localhost/tmp/sample.txt", "test")
.unwrap()
.to_str()
.unwrap()
);
assert_eq!(
expected_path,
convert_to_path("file:///tmp/../tmp/sample.txt", "test")
.unwrap()
.to_str()
.unwrap()
);
convert_to_path("file://deadhost/tmp/sample.txt", "test")
.expect_err("Non localhost host specified");
convert_to_path("https://localhost/tmp/sample.txt", "test")
.expect_err("Non file scheme specified");
}
}
#[test]
fn test_convert_to_uri() {
if cfg!(windows) {
let expected_uri_str = "file:///C:/temp/sample.txt";
let expected_uri = Url::parse(expected_uri_str).unwrap();
assert_eq!(
expected_uri,
convert_to_uri("file:///C:/temp/sample.txt", "test").unwrap()
);
assert_eq!(
expected_uri,
convert_to_uri("file://localhost/C:/temp/sample.txt", "test").unwrap()
);
assert_eq!(
expected_uri,
convert_to_uri("file://localhost/C:/temp/../temp/sample.txt", "test").unwrap()
);
// oddly this works because the host is null since local drive is specified
assert_eq!(
expected_uri,
convert_to_uri("file://deadhost/C:/temp/sample.txt", "test").unwrap()
);
convert_to_uri("file://deadhost/temp/sample.txt", "test")
.expect_err("Non localhost host specified");
convert_to_uri("file://deadhost/temp/sample.txt", "test")
.expect_err("Non file scheme specified");
convert_to_uri("../tmp/../tmp/sample.txt", "test")
.expect_err("Non absolute path specified");
} else {
let expected_uri_str = "file:///tmp/sample.txt";
let expected_uri = Url::parse(expected_uri_str).unwrap();
assert_eq!(
expected_uri,
convert_to_uri("file:///tmp/sample.txt", "test").unwrap()
);
assert_eq!(
expected_uri,
convert_to_uri("file://localhost/tmp/sample.txt", "test").unwrap()
);
assert_eq!(
expected_uri,
convert_to_uri("file:///tmp/../tmp/sample.txt", "test").unwrap()
);
convert_to_uri("https://localhost/tmp/sample.txt", "test")
.expect_err("Non absolute path specified");
assert_eq!(
expected_uri,
convert_to_uri("/tmp/sample.txt", "test").unwrap()
);
convert_to_uri("../tmp/../tmp/sample.txt", "test")
.expect_err("Non absolute path specified");
convert_to_uri("file://deadhost/tmp/sample.txt", "test")
.expect_err("Non localhost host specified");
}
}
}
| 30.518652 | 139 | 0.568392 |
4a70c9c2f7cd3bf6d3cc563e6b2c5a86b3a0079e | 5,353 | //! [](https://docs.rs/slog_unwraps)
//! 
//! [](https://travis-ci.org/najamelan/slog_unwraps)
//! [](https://www.repostatus.org/#active)
//!
//! Syntactic sugar to slog an error before [unwrapping](https://doc.rust-lang.org/std/result/enum.Result.html#method.unwrap).
//! It will add caller file and line information to the log statement so you don't have to turn on RUST_BACKTRACE to see what
//! went wrong, but know that that only makes sense in debug mode. In release mode this information will either be missing or unreliable.
//!
//! At first I had an `expects` function as well to be able to add context, but I really think you should use the
//! [`failure` crate](https://docs.rs/failure), which provides a `context` method on errors, which is much cleaner, so `expects`
//! no longer exists. If you don't want to use `failure`, you will have to make sure your errors display sensible messages.
//!
//! ## Example
//!
//! run with `cargo run --example basic`
//!
//! ```rust should_panic
//! use
//! {
//! std :: { fs::File } ,
//! slog :: { Drain, Level, Logger, o, crit } ,
//! slog_term :: { FullFormat, PlainSyncDecorator } ,
//! slog_unwraps :: { ResultExt } ,
//! };
//!
//! fn main()
//! {
//! let plain = PlainSyncDecorator::new( std::io::stderr() ) ;
//! let log = Logger::root( FullFormat::new( plain ).build().fuse(), o!() ) ;
//!
//!
//! // This will output (in one line, wrapped here for readablility):
//! //
//! // Mar 08 18:13:52.034 CRIT PANIC - fn `main` calls `unwraps` @ examples/basic.rs:20
//! // -> Error: No such file or directory (os error 2)
//! //
//! // and then will call unwrap for you
//! //
//! let f = File::open( "dont.exist" );
//! let _file = f.unwraps( &log );
//!
//!
//! // This is equivalent. Of course you can do something else with the result after logging
//! // rather than unwrapping. This only logs if the result is an error.
//! //
//! let g = File::open( "dont.exist" );
//! let _file = g.log( &log, Level::Critical ).unwrap();
//!
//! // Without this crate, everytime you want to unwrap, you would write something like:
//! //
//! let h = File::open( "dont.exist" );
//!
//! let _file = match h
//! {
//! Ok ( f ) => f,
//! Err( e ) => { crit!( log, "{}", e ); panic!() }
//! };
//! }
//! ```
//!
use
{
std::fmt :: { Debug, Display },
backtrace :: { Backtrace },
regex :: { Regex },
slog :: { Logger, trace, debug, info, warn, error, crit, Level },
};
/// Extends the [std::result::Result](https://doc.rust-lang.org/std/result/enum.Result.html) type with extra methods to ease logging of errors.
///
pub trait ResultExt<T, E>
where E: Display + Debug
{
/// Logs the error to the provided logger before unwrapping.
///
fn unwraps( self, log: &Logger ) -> T;
/// Logs a potential error in the result and returns the result intact.
///
fn log ( self, log: &Logger, lvl: slog::Level ) -> Result<T,E>;
}
impl<T, E> ResultExt<T, E> for Result<T, E> where E: Display + Debug
{
fn unwraps( self, log: &Logger ) -> T
{
self.map_err( |e|
{
crit!( log, "{} -> Error: {}" , demangle( "unwraps" ), e );
e
}).unwrap()
}
fn log( self, log: &Logger, lvl: Level ) -> Result<T, E>
{
self.map_err( |e|
{
match lvl
{
Level::Trace => trace!( log, "{}", e ),
Level::Debug => debug!( log, "{}", e ),
Level::Info => info! ( log, "{}", e ),
Level::Warning => warn! ( log, "{}", e ),
Level::Error => error!( log, "{}", e ),
Level::Critical => crit! ( log, "{}", e ),
}
e
})
}
}
// Demangle the API of the backtrace crate!
//
// Returns the caller function name + file:lineno for logging in ResultExtSlog
//
fn demangle( which: &str ) -> String
{
let empty = String::with_capacity(0);
let bt = Backtrace::new();
let frames = bt.frames();
let frame = &frames.get( 4 );
if let Some( frame ) = frame {
if let Some( symbol ) = frame.symbols().last()
{
format!
(
"PANIC - fn `{}` calls `{}` @ {}:{}"
, symbol.name() .map( |s| strip( format!( "{}", s ) ) ).unwrap_or_else( || empty.clone() )
, which
, symbol.filename().map( |s| s.to_string_lossy().to_string() ).unwrap_or_else( || empty.clone() )
, symbol.lineno() .map( |s| format!( "{}", s ) ).unwrap_or( empty )
)
} else { empty }
} else { empty }
}
// Will return the function name from a string returned by backtrace:
//
// ekke::main::dkk39ru458u3 -> main
//
fn strip( input: String ) -> String
{
let re = Regex::new( r"([^:]+)::[[:alnum:]]+$" ).unwrap();
re.captures( &input )
.map( |caps|
caps.get(1)
.map_or( String::new(), |m| m.as_str().to_string() )
)
.unwrap_or( input )
}
| 31.122093 | 202 | 0.566785 |
1db4e964996de7f8b3ad5abe8dbc5e42e0a3aa04 | 634 | #[doc = "POWER register accessor: an alias for `Reg<POWER_SPEC>`"]
pub type POWER = crate::Reg<power::POWER_SPEC>;
#[doc = "Description cluster: RAM\\[n\\]
power control register"]
pub mod power;
#[doc = "POWERSET register accessor: an alias for `Reg<POWERSET_SPEC>`"]
pub type POWERSET = crate::Reg<powerset::POWERSET_SPEC>;
#[doc = "Description cluster: RAM\\[n\\]
power control set register"]
pub mod powerset;
#[doc = "POWERCLR register accessor: an alias for `Reg<POWERCLR_SPEC>`"]
pub type POWERCLR = crate::Reg<powerclr::POWERCLR_SPEC>;
#[doc = "Description cluster: RAM\\[n\\]
power control clear register"]
pub mod powerclr;
| 39.625 | 72 | 0.722397 |
ab1186722f0623e6e361824bde0a350d6a120457 | 15,008 | use std::prelude::v1::*;
use std::fmt;
use std::str::Chars;
use std::time::Duration;
use std::error::Error as StdError;
quick_error! {
/// Error parsing human-friendly duration
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Error {
/// Invalid character during parsing
///
/// More specifically anything that is not alphanumeric is prohibited
///
/// The field is an byte offset of the character in the string.
InvalidCharacter(offset: usize) {
display("invalid character at {}", offset)
description("invalid character")
}
/// Non-numeric value where number is expected
///
/// This usually means that either time unit is broken into words,
/// e.g. `m sec` instead of `msec`, or just number is omitted,
/// for example `2 hours min` instead of `2 hours 1 min`
///
/// The field is an byte offset of the errorneous character
/// in the string.
NumberExpected(offset: usize) {
display("expected number at {}", offset)
description("expected number")
}
/// Unit in the number is not one of allowed units
///
/// See documentation of `parse_duration` for the list of supported
/// time units.
///
/// The two fields are start and end (exclusive) of the slice from
/// the original string, containing errorneous value
UnknownUnit(start: usize, end: usize) {
display("unknown unit at {}-{}", start, end)
description("unknown unit")
}
/// The numeric value is too large
///
/// Usually this means value is too large to be useful. If user writes
/// data in subsecond units, then the maximum is about 3k years. When
/// using seconds, or larger units, the limit is even larger.
NumberOverflow {
display(self_) -> ("{}", self_.description())
description("number is too large")
}
/// The value was an empty string (or consists only whitespace)
Empty {
display(self_) -> ("{}", self_.description())
description("value was empty")
}
}
}
/// A wrapper type that allows you to Display a Duration
#[derive(Debug, Clone)]
pub struct FormattedDuration(Duration);
trait OverflowOp: Sized {
fn mul(self, other: Self) -> Result<Self, Error>;
fn add(self, other: Self) -> Result<Self, Error>;
}
impl OverflowOp for u64 {
fn mul(self, other: Self) -> Result<Self, Error> {
self.checked_mul(other).ok_or(Error::NumberOverflow)
}
fn add(self, other: Self) -> Result<Self, Error> {
self.checked_add(other).ok_or(Error::NumberOverflow)
}
}
struct Parser<'a> {
iter: Chars<'a>,
src: &'a str,
current: (u64, u64),
}
impl<'a> Parser<'a> {
fn off(&self) -> usize {
self.src.len() - self.iter.as_str().len()
}
fn parse_first_char(&mut self) -> Result<Option<u64>, Error> {
let off = self.off();
for c in self.iter.by_ref() {
match c {
'0'...'9' => {
return Ok(Some(c as u64 - '0' as u64));
}
c if c.is_whitespace() => continue,
_ => {
return Err(Error::NumberExpected(off));
}
}
}
return Ok(None);
}
fn parse_unit(&mut self, n: u64, start: usize, end: usize)
-> Result<(), Error>
{
let (mut sec, nsec) = match &self.src[start..end] {
"nanos" | "nsec" | "ns" => (0u64, n),
"usec" | "us" => (0u64, try!(n.mul(1000))),
"millis" | "msec" | "ms" => (0u64, try!(n.mul(1000_000))),
"seconds" | "second" | "secs" | "sec" | "s" => (n, 0),
"minutes" | "minute" | "min" | "mins" | "m"
=> (try!(n.mul(60)), 0),
"hours" | "hour" | "hr" | "hrs" | "h" => (try!(n.mul(3600)), 0),
"days" | "day" | "d" => (try!(n.mul(86400)), 0),
"weeks" | "week" | "w" => (try!(n.mul(86400*7)), 0),
"months" | "month" | "M" => (try!(n.mul(2630016)), 0), // 30.44d
"years" | "year" | "y" => (try!(n.mul(31557600)), 0), // 365.25d
_ => return Err(Error::UnknownUnit(start, end)),
};
let mut nsec = try!(self.current.1.add(nsec));
if nsec > 1000_000_000 {
sec = try!(sec.add(nsec / 1000_000_000));
nsec %= 1000_000_000;
}
sec = try!(self.current.0.add(sec));
self.current = (sec, nsec);
Ok(())
}
fn parse(mut self) -> Result<Duration, Error> {
let mut n = try!(try!(self.parse_first_char()).ok_or(Error::Empty));
'outer: loop {
let mut off = self.off();
while let Some(c) = self.iter.next() {
match c {
'0'...'9' => {
n = try!(n.checked_mul(10)
.and_then(|x| x.checked_add(c as u64 - '0' as u64))
.ok_or(Error::NumberOverflow));
}
c if c.is_whitespace() => {}
'a'...'z' | 'A'...'Z' => {
break;
}
_ => {
return Err(Error::InvalidCharacter(off));
}
}
off = self.off();
}
let start = off;
let mut off = self.off();
while let Some(c) = self.iter.next() {
match c {
'0'...'9' => {
try!(self.parse_unit(n, start, off));
n = c as u64 - '0' as u64;
continue 'outer;
}
c if c.is_whitespace() => break,
'a'...'z' | 'A'...'Z' => {}
_ => {
return Err(Error::InvalidCharacter(off));
}
}
off = self.off();
}
try!(self.parse_unit(n, start, off));
n = match try!(self.parse_first_char()) {
Some(n) => n,
None => return Ok(
Duration::new(self.current.0, self.current.1 as u32)),
};
}
}
}
/// Parse duration object `1hour 12min 5s`
///
/// The duration object is a concatenation of time spans. Where each time
/// span is an integer number and a suffix. Supported suffixes:
///
/// * `nsec`, `ns` -- microseconds
/// * `usec`, `us` -- microseconds
/// * `msec`, `ms` -- milliseconds
/// * `seconds`, `second`, `sec`, `s`
/// * `minutes`, `minute`, `min`, `m`
/// * `hours`, `hour`, `hr`, `h`
/// * `days`, `day`, `d`
/// * `weeks`, `week`, `w`
/// * `months`, `month`, `M` -- defined as 30.44 days
/// * `years`, `year`, `y` -- defined as 365.25 days
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use humantime::parse_duration;
///
/// assert_eq!(parse_duration("2h 37min"), Ok(Duration::new(9420, 0)));
/// assert_eq!(parse_duration("32ms"), Ok(Duration::new(0, 32_000_000)));
/// ```
pub fn parse_duration(s: &str) -> Result<Duration, Error> {
Parser {
iter: s.chars(),
src: s,
current: (0, 0),
}.parse()
}
/// Formats duration into a human-readable string
///
/// Note: this format is guaranteed to have same value when using
/// parse_duration, but we can change some details of the exact composition
/// of the value.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use humantime::format_duration;
///
/// let val1 = Duration::new(9420, 0);
/// assert_eq!(format_duration(val1).to_string(), "2h 37m");
/// let val2 = Duration::new(0, 32_000_000);
/// assert_eq!(format_duration(val2).to_string(), "32ms");
/// ```
pub fn format_duration(val: Duration) -> FormattedDuration {
FormattedDuration(val)
}
fn item_plural(f: &mut fmt::Formatter, started: &mut bool,
name: &str, value: u64)
-> fmt::Result
{
if value > 0 {
if *started {
f.write_str(" ")?;
}
write!(f, "{}{}", value, name)?;
if value > 1 {
f.write_str("s")?;
}
*started = true;
}
Ok(())
}
fn item(f: &mut fmt::Formatter, started: &mut bool, name: &str, value: u32)
-> fmt::Result
{
if value > 0 {
if *started {
f.write_str(" ")?;
}
write!(f, "{}{}", value, name)?;
*started = true;
}
Ok(())
}
impl fmt::Display for FormattedDuration {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let secs = self.0.as_secs();
let nanos = self.0.subsec_nanos();
if secs == 0 && nanos == 0 {
f.write_str("0s")?;
return Ok(());
}
let years = secs / 31557600; // 365.25d
let ydays = secs % 31557600;
let months = ydays / 2630016; // 30.44d
let mdays = ydays % 2630016;
let days = mdays / 86400;
let day_secs = mdays % 86400;
let hours = day_secs / 3600;
let minutes = day_secs % 3600 / 60;
let seconds = day_secs % 60;
let millis = nanos / 1_000_000;
let micros = nanos / 1000 % 1000;
let nanosec = nanos % 1000;
let ref mut started = false;
item_plural(f, started, "year", years)?;
item_plural(f, started, "month", months)?;
item_plural(f, started, "day", days)?;
item(f, started, "h", hours as u32)?;
item(f, started, "m", minutes as u32)?;
item(f, started, "s", seconds as u32)?;
item(f, started, "ms", millis)?;
item(f, started, "us", micros)?;
item(f, started, "ns", nanosec)?;
Ok(())
}
}
#[cfg(test)]
mod test {
extern crate rand;
use std::time::Duration;
use self::rand::Rng;
use super::{parse_duration, format_duration};
use super::Error;
#[test]
fn test_units() {
assert_eq!(parse_duration("17nsec"), Ok(Duration::new(0, 17)));
assert_eq!(parse_duration("17nanos"), Ok(Duration::new(0, 17)));
assert_eq!(parse_duration("33ns"), Ok(Duration::new(0, 33)));
assert_eq!(parse_duration("3usec"), Ok(Duration::new(0, 3000)));
assert_eq!(parse_duration("78us"), Ok(Duration::new(0, 78000)));
assert_eq!(parse_duration("31msec"), Ok(Duration::new(0, 31000000)));
assert_eq!(parse_duration("31millis"), Ok(Duration::new(0, 31000000)));
assert_eq!(parse_duration("6ms"), Ok(Duration::new(0, 6000000)));
assert_eq!(parse_duration("3000s"), Ok(Duration::new(3000, 0)));
assert_eq!(parse_duration("300sec"), Ok(Duration::new(300, 0)));
assert_eq!(parse_duration("300secs"), Ok(Duration::new(300, 0)));
assert_eq!(parse_duration("50seconds"), Ok(Duration::new(50, 0)));
assert_eq!(parse_duration("1second"), Ok(Duration::new(1, 0)));
assert_eq!(parse_duration("100m"), Ok(Duration::new(6000, 0)));
assert_eq!(parse_duration("12min"), Ok(Duration::new(720, 0)));
assert_eq!(parse_duration("12mins"), Ok(Duration::new(720, 0)));
assert_eq!(parse_duration("1minute"), Ok(Duration::new(60, 0)));
assert_eq!(parse_duration("7minutes"), Ok(Duration::new(420, 0)));
assert_eq!(parse_duration("2h"), Ok(Duration::new(7200, 0)));
assert_eq!(parse_duration("7hr"), Ok(Duration::new(25200, 0)));
assert_eq!(parse_duration("7hrs"), Ok(Duration::new(25200, 0)));
assert_eq!(parse_duration("1hour"), Ok(Duration::new(3600, 0)));
assert_eq!(parse_duration("24hours"), Ok(Duration::new(86400, 0)));
assert_eq!(parse_duration("1day"), Ok(Duration::new(86400, 0)));
assert_eq!(parse_duration("2days"), Ok(Duration::new(172800, 0)));
assert_eq!(parse_duration("365d"), Ok(Duration::new(31536000, 0)));
assert_eq!(parse_duration("1week"), Ok(Duration::new(604800, 0)));
assert_eq!(parse_duration("7weeks"), Ok(Duration::new(4233600, 0)));
assert_eq!(parse_duration("52w"), Ok(Duration::new(31449600, 0)));
assert_eq!(parse_duration("1month"), Ok(Duration::new(2630016, 0)));
assert_eq!(parse_duration("3months"), Ok(Duration::new(3*2630016, 0)));
assert_eq!(parse_duration("12M"), Ok(Duration::new(31560192, 0)));
assert_eq!(parse_duration("1year"), Ok(Duration::new(31557600, 0)));
assert_eq!(parse_duration("7years"), Ok(Duration::new(7*31557600, 0)));
assert_eq!(parse_duration("17y"), Ok(Duration::new(536479200, 0)));
}
#[test]
fn test_combo() {
assert_eq!(parse_duration("20 min 17 nsec "), Ok(Duration::new(1200, 17)));
assert_eq!(parse_duration("2h 15m"), Ok(Duration::new(8100, 0)));
}
#[test]
fn all_86400_seconds() {
for second in 0..86400 { // scan leap year and non-leap year
let d = Duration::new(second, 0);
assert_eq!(d,
parse_duration(&format_duration(d).to_string()).unwrap());
}
}
#[test]
fn random_second() {
for _ in 0..10000 {
let sec = rand::thread_rng().gen_range(0, 253370764800);
let d = Duration::new(sec, 0);
assert_eq!(d,
parse_duration(&format_duration(d).to_string()).unwrap());
}
}
#[test]
fn random_any() {
for _ in 0..10000 {
let sec = rand::thread_rng().gen_range(0, 253370764800);
let nanos = rand::thread_rng().gen_range(0, 1_000_000_000);
let d = Duration::new(sec, nanos);
assert_eq!(d,
parse_duration(&format_duration(d).to_string()).unwrap());
}
}
#[test]
fn test_overlow() {
// Overflow on subseconds is earlier because of how we do conversion
// we could fix it, but I don't see any good reason for this
assert_eq!(parse_duration("100000000000000000000ns"),
Err(Error::NumberOverflow));
assert_eq!(parse_duration("100000000000000000us"),
Err(Error::NumberOverflow));
assert_eq!(parse_duration("100000000000000ms"),
Err(Error::NumberOverflow));
assert_eq!(parse_duration("100000000000000000000s"),
Err(Error::NumberOverflow));
assert_eq!(parse_duration("10000000000000000000m"),
Err(Error::NumberOverflow));
assert_eq!(parse_duration("1000000000000000000h"),
Err(Error::NumberOverflow));
assert_eq!(parse_duration("100000000000000000d"),
Err(Error::NumberOverflow));
assert_eq!(parse_duration("10000000000000000w"),
Err(Error::NumberOverflow));
assert_eq!(parse_duration("1000000000000000M"),
Err(Error::NumberOverflow));
assert_eq!(parse_duration("10000000000000y"),
Err(Error::NumberOverflow));
}
}
| 36.338983 | 83 | 0.534382 |
e4174b47b99e2ef56d5474f0dfdc5cbb6eff1935 | 681 | use cell_project::cell_project as project;
use std::cell::Cell;
mod a {
pub struct Foo {
pub name: String,
pub build: i32,
}
pub struct Bar<T> {
pub name: String,
pub build: T,
}
pub struct Quax<T, U> {
pub name: String,
pub build: T,
pub value: U,
}
}
fn generic<T>(bar: &mut a::Bar<T>) -> &Cell<String> {
let bar = Cell::from_mut(bar);
project!(a::Bar<_>, bar.name)
}
fn super_generic<T, U>(quax: &mut a::Quax<T, a::Quax<a::Bar<a::Foo>, a::Bar<U>>>) -> &Cell<String> {
let quax = Cell::from_mut(quax);
project!(a::Quax<_, a::Quax<a::Bar<a::Foo>, _>>, quax.name)
}
fn main() {}
| 20.636364 | 100 | 0.534508 |
4bcb13525d52aeaf7b4496d3d84b9fb1eb808f0d | 21,945 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[derive(std::fmt::Debug)]
pub(crate) struct Handle<C = aws_hyper::DynConnector> {
client: aws_hyper::Client<C>,
conf: crate::Config,
}
#[derive(Clone, std::fmt::Debug)]
pub struct Client<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<Handle<C>>,
}
impl<C> Client<C> {
pub fn from_conf_conn(conf: crate::Config, conn: C) -> Self {
let client = aws_hyper::Client::new(conn);
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
pub fn conf(&self) -> &crate::Config {
&self.handle.conf
}
}
impl Client {
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_env() -> Self {
Self::from_conf(crate::Config::builder().build())
}
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_conf(conf: crate::Config) -> Self {
let client = aws_hyper::Client::https();
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
impl<C> Client<C>
where
C: aws_hyper::SmithyConnector,
{
pub fn create_event_integration(&self) -> fluent_builders::CreateEventIntegration<C> {
fluent_builders::CreateEventIntegration::new(self.handle.clone())
}
pub fn delete_event_integration(&self) -> fluent_builders::DeleteEventIntegration<C> {
fluent_builders::DeleteEventIntegration::new(self.handle.clone())
}
pub fn get_event_integration(&self) -> fluent_builders::GetEventIntegration<C> {
fluent_builders::GetEventIntegration::new(self.handle.clone())
}
pub fn list_event_integration_associations(
&self,
) -> fluent_builders::ListEventIntegrationAssociations<C> {
fluent_builders::ListEventIntegrationAssociations::new(self.handle.clone())
}
pub fn list_event_integrations(&self) -> fluent_builders::ListEventIntegrations<C> {
fluent_builders::ListEventIntegrations::new(self.handle.clone())
}
pub fn list_tags_for_resource(&self) -> fluent_builders::ListTagsForResource<C> {
fluent_builders::ListTagsForResource::new(self.handle.clone())
}
pub fn tag_resource(&self) -> fluent_builders::TagResource<C> {
fluent_builders::TagResource::new(self.handle.clone())
}
pub fn untag_resource(&self) -> fluent_builders::UntagResource<C> {
fluent_builders::UntagResource::new(self.handle.clone())
}
pub fn update_event_integration(&self) -> fluent_builders::UpdateEventIntegration<C> {
fluent_builders::UpdateEventIntegration::new(self.handle.clone())
}
}
pub mod fluent_builders {
#[derive(std::fmt::Debug)]
pub struct CreateEventIntegration<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::create_event_integration_input::Builder,
}
impl<C> CreateEventIntegration<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateEventIntegrationOutput,
smithy_http::result::SdkError<crate::error::CreateEventIntegrationError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the event integration.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
/// <p>The description of the event integration.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(input);
self
}
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// <p>The event filter.</p>
pub fn event_filter(mut self, input: crate::model::EventFilter) -> Self {
self.inner = self.inner.event_filter(input);
self
}
pub fn set_event_filter(
mut self,
input: std::option::Option<crate::model::EventFilter>,
) -> Self {
self.inner = self.inner.set_event_filter(input);
self
}
/// <p>The EventBridge bus.</p>
pub fn event_bridge_bus(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.event_bridge_bus(input);
self
}
pub fn set_event_bridge_bus(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_event_bridge_bus(input);
self
}
/// <p>A unique, case-sensitive identifier that you provide to ensure the idempotency of the
/// request.</p>
pub fn client_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.client_token(input);
self
}
pub fn set_client_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_client_token(input);
self
}
/// <p>One or more tags.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.tags(k, v);
self
}
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteEventIntegration<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_event_integration_input::Builder,
}
impl<C> DeleteEventIntegration<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteEventIntegrationOutput,
smithy_http::result::SdkError<crate::error::DeleteEventIntegrationError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the event integration.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetEventIntegration<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_event_integration_input::Builder,
}
impl<C> GetEventIntegration<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetEventIntegrationOutput,
smithy_http::result::SdkError<crate::error::GetEventIntegrationError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the event integration. </p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListEventIntegrationAssociations<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_event_integration_associations_input::Builder,
}
impl<C> ListEventIntegrationAssociations<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListEventIntegrationAssociationsOutput,
smithy_http::result::SdkError<crate::error::ListEventIntegrationAssociationsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the event integration. </p>
pub fn event_integration_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.event_integration_name(input);
self
}
pub fn set_event_integration_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_event_integration_name(input);
self
}
/// <p>The token for the next set of results. Use the value returned in the previous
/// response in the next request to retrieve the next set of results.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input);
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of results to return per page.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListEventIntegrations<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_event_integrations_input::Builder,
}
impl<C> ListEventIntegrations<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListEventIntegrationsOutput,
smithy_http::result::SdkError<crate::error::ListEventIntegrationsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The token for the next set of results. Use the value returned in the previous
/// response in the next request to retrieve the next set of results.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input);
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of results to return per page.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListTagsForResource<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_tags_for_resource_input::Builder,
}
impl<C> ListTagsForResource<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
smithy_http::result::SdkError<crate::error::ListTagsForResourceError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) of the resource. </p>
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(input);
self
}
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct TagResource<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::tag_resource_input::Builder,
}
impl<C> TagResource<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::TagResourceOutput,
smithy_http::result::SdkError<crate::error::TagResourceError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) of the resource.</p>
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(input);
self
}
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// <p>One or more tags. </p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.tags(k, v);
self
}
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct UntagResource<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::untag_resource_input::Builder,
}
impl<C> UntagResource<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::UntagResourceOutput,
smithy_http::result::SdkError<crate::error::UntagResourceError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) of the resource.</p>
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(input);
self
}
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// <p>The tag keys.</p>
pub fn tag_keys(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.tag_keys(inp);
self
}
pub fn set_tag_keys(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_tag_keys(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct UpdateEventIntegration<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::update_event_integration_input::Builder,
}
impl<C> UpdateEventIntegration<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateEventIntegrationOutput,
smithy_http::result::SdkError<crate::error::UpdateEventIntegrationError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the event integration.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
/// <p>The description of the event inegration.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(input);
self
}
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
}
}
| 38.165217 | 100 | 0.551652 |
8f767bc3154e479e72826f7563c2c7d9878523cb | 40,713 | use std::time::{Duration, Instant};
use metrix::{
processor::ProcessorMount, AggregatesProcessors, Decrement, DecrementBy, Increment,
IncrementBy, TelemetryTransmitter, TimeUnit, TransmitsTelemetryData,
};
use serde::{Deserialize, Serialize};
use crate::components::{
committer::CommitError,
connector::ConnectError,
streams::{EventStreamBatchStats, EventStreamError, EventStreamErrorKind},
};
use crate::internals::background_committer::CommitTrigger;
use crate::nakadi_types::subscription::StreamParameters;
use super::Instruments;
/// Instrumentation with Metrix
#[derive(Clone)]
pub struct Metrix {
tx: TelemetryTransmitter<Metric>,
}
impl Metrix {
/// Initializes the metrics.
///
/// Adds them directly into the given `processor` without creating an additional group.
pub fn new<A: AggregatesProcessors>(config: &MetrixConfig, processor: &mut A) -> Self {
let (tx, global_proc) = instr::create(&config);
processor.add_processor(global_proc);
Metrix { tx }
}
/// Creates new Metrix instrumentation and returns a mount that can be plugged with metrix
/// and the instrumentation which can be plugged into the `Consumer`
pub fn new_mountable(config: &MetrixConfig, name: Option<&str>) -> (Metrix, ProcessorMount) {
let mut mount = if let Some(name) = name {
ProcessorMount::new(name)
} else {
ProcessorMount::default()
};
let me = Self::new(config, &mut mount);
(me, mount)
}
}
new_type! {
#[doc="The time a gauge will track values.\n\nDefault is 60s.\n"]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub secs struct MetrixGaugeTrackingSecs(u32, env="METRIX_GAUGE_TRACKING_SECS");
}
impl Default for MetrixGaugeTrackingSecs {
fn default() -> Self {
60.into()
}
}
new_type! {
#[doc="The time an alert will stay on after trigger.\n\nDefault is 61s.\n"]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub secs struct AlertDurationSecs(u32, env="METRIX_ALERT_DURATION_SECS");
}
impl Default for AlertDurationSecs {
fn default() -> Self {
61.into()
}
}
new_type! {
#[doc="The time after which a histogram is reset.\n\nDefault is 60s.\n"]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub secs struct HistogramInactivityResetSecs(u32, env="METRIX_HISTOGRAM_INACTIVITY_RESET_SECS");
}
impl Default for HistogramInactivityResetSecs {
fn default() -> Self {
60.into()
}
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
#[non_exhaustive]
pub struct MetrixConfig {
/// Enables tracking for gauges for the given amount of seconds
pub gauge_tracking_secs: Option<MetrixGaugeTrackingSecs>,
/// Enables tracking for gauges for the given amount of seconds
pub alert_duration_secs: Option<AlertDurationSecs>,
/// Enables tracking for gauges for the given amount of seconds
pub histogram_inactivity_reset_secs: Option<HistogramInactivityResetSecs>,
}
impl MetrixConfig {
env_ctors!();
fn fill_from_env_prefixed_internal<T: AsRef<str>>(
&mut self,
prefix: T,
) -> Result<(), crate::Error> {
if self.gauge_tracking_secs.is_none() {
self.gauge_tracking_secs =
MetrixGaugeTrackingSecs::try_from_env_prefixed(prefix.as_ref())?;
}
if self.alert_duration_secs.is_none() {
self.alert_duration_secs = AlertDurationSecs::try_from_env_prefixed(prefix.as_ref())?;
}
if self.histogram_inactivity_reset_secs.is_none() {
self.histogram_inactivity_reset_secs =
HistogramInactivityResetSecs::try_from_env_prefixed(prefix.as_ref())?;
}
Ok(())
}
}
impl Instruments for Metrix {
fn consumer_started(&self) {
self.tx.observed_one_now(Metric::ConsumerStarted);
}
fn consumer_stopped(&self, ran_for: Duration) {
self.tx.observed_one_value_now(
Metric::ConsumerStoppedWithTime,
(ran_for, TimeUnit::Milliseconds),
);
}
fn streaming_ended(&self, streamed_for: Duration) {
self.tx.observed_one_value_now(
Metric::StreamingEndedWithTime,
(streamed_for, TimeUnit::Milliseconds),
);
}
fn stream_connect_attempt_success(&self, time: Duration) {
self.tx.observed_one_value_now(
Metric::StreamConnectAttemptSuccessTime,
(time, TimeUnit::Milliseconds),
);
}
fn stream_connect_attempt_failed(&self, time: Duration) {
self.tx.observed_one_value_now(
Metric::StreamConnectAttemptFailedTime,
(time, TimeUnit::Milliseconds),
);
}
fn stream_connected(&self, time: Duration) {
self.tx
.observed_one_value_now(Metric::StreamConnectedTime, (time, TimeUnit::Milliseconds));
}
fn stream_not_connected(&self, time: Duration, _err: &ConnectError) {
self.tx.observed_one_value_now(
Metric::StreamNotConnectedTime,
(time, TimeUnit::Milliseconds),
);
}
fn stream_chunk_received(&self, n_bytes: usize) {
self.tx
.observed_one_value_now(Metric::StreamChunkReceivedBytes, n_bytes);
}
fn stream_frame_completed(&self, n_bytes: usize, time: Duration) {
self.tx
.observed_one_value_now(Metric::StreamFrameCompletedBytes, n_bytes)
.observed_one_value_now(
Metric::StreamFrameCompletedTime,
(time, TimeUnit::Microseconds),
);
}
fn stream_tick_emitted(&self) {
self.tx.observed_one_now(Metric::StreamTickEmitted);
}
fn info_frame_received(&self, _frame_started_at: Instant, frame_completed_at: Instant) {
self.tx.observed_one_value_now(
Metric::StreamInfoFrameReceivedLag,
(frame_completed_at.elapsed(), TimeUnit::Microseconds),
);
}
fn keep_alive_frame_received(&self, _frame_started_at: Instant, frame_completed_at: Instant) {
self.tx.observed_one_value_now(
Metric::StreamKeepAliveFrameReceivedLag,
(frame_completed_at.elapsed(), TimeUnit::Microseconds),
);
}
fn batch_frame_received(
&self,
_frame_started_at: Instant,
frame_completed_at: Instant,
events_bytes: usize,
) {
self.tx
.observed_one_value_now(Metric::StreamBatchFrameReceivedBytes, events_bytes);
self.tx.observed_one_value_now(
Metric::StreamBatchFrameReceivedLag,
(frame_completed_at.elapsed(), TimeUnit::Microseconds),
);
}
fn batch_frame_gap(&self, gap: Duration) {
self.tx
.observed_one_value_now(Metric::StreamBatchFrameGap, (gap, TimeUnit::Microseconds));
}
fn no_frames_warning(&self, no_frames_for: Duration) {
self.tx.observed_one_value_now(
Metric::NoFramesForWarning,
(no_frames_for, TimeUnit::Milliseconds),
);
}
fn no_events_warning(&self, no_events_for: Duration) {
self.tx.observed_one_value_now(
Metric::NoEventsForWarning,
(no_events_for, TimeUnit::Milliseconds),
);
}
fn stream_dead(&self, after: Duration) {
self.tx
.observed_one_value_now(Metric::StreamDeadAfter, (after, TimeUnit::Milliseconds));
}
fn stream_error(&self, err: &EventStreamError) {
match err.kind() {
EventStreamErrorKind::Io => {
self.tx.observed_one_now(Metric::StreamErrorIo);
}
EventStreamErrorKind::Parser => {
self.tx.observed_one_now(Metric::StreamErrorParse);
}
}
}
fn stream_unconsumed_events(&self, n_unconsumed: usize) {
self.tx
.observed_one_value_now(Metric::StreamUnconsumedEvents, n_unconsumed);
}
fn batches_in_flight_incoming(&self, stats: &EventStreamBatchStats) {
self.tx
.observed_one_value_now(Metric::BatchesInFlightChanged, Increment)
.observed_one_value_now(
Metric::EventsInFlightChanged,
IncrementBy(stats.n_events as u32),
)
.observed_one_value_now(
Metric::BytesInFlightChanged,
IncrementBy(stats.n_bytes as u32),
)
.observed_one_value_now(Metric::UncommittedBatchesChanged, Increment)
.observed_one_value_now(
Metric::UncommittedEventsChanged,
IncrementBy(stats.n_events as u32),
);
}
fn batches_in_flight_processed(&self, stats: &EventStreamBatchStats) {
self.tx
.observed_one_value_now(Metric::BatchesInFlightChanged, Decrement)
.observed_one_value_now(
Metric::EventsInFlightChanged,
DecrementBy(stats.n_events as u32),
)
.observed_one_value_now(
Metric::BytesInFlightChanged,
DecrementBy(stats.n_bytes as u32),
);
}
fn in_flight_stats_reset(&self) {
self.tx
.observed_one_value_now(Metric::BatchesInFlightChanged, 0)
.observed_one_value_now(Metric::EventsInFlightChanged, 0)
.observed_one_value_now(Metric::BytesInFlightChanged, 0)
.observed_one_value_now(Metric::UncommittedBatchesChanged, 0)
.observed_one_value_now(Metric::UncommittedEventsChanged, 0);
}
fn event_type_partition_activated(&self) {
self.tx
.observed_one_value_now(Metric::EventTypePartitionActivated, Increment);
}
fn event_type_partition_deactivated(&self, active_for: Duration) {
self.tx.observed_one_value_now(
Metric::EventTypePartitionDeactivatedAfter,
(active_for, TimeUnit::Milliseconds),
);
}
fn batch_processing_started(&self, _frame_started_at: Instant, frame_completed_at: Instant) {
self.tx.observed_one_value_now(
Metric::BatchProcessingStartedLag,
(frame_completed_at.elapsed(), TimeUnit::Microseconds),
);
}
fn batch_processed(&self, n_bytes: usize, time: Duration) {
self.tx
.observed_one_value_now(Metric::BatchProcessedTime, (time, TimeUnit::Microseconds))
.observed_one_value_now(Metric::BatchProcessedBytes, n_bytes);
}
fn batch_processed_n_events(&self, n_events: usize) {
self.tx
.observed_one_value_now(Metric::BatchProcessedNEvents, n_events);
}
fn batch_deserialized(&self, n_bytes: usize, time: Duration) {
self.tx
.observed_one_value_now(Metric::BatchDeserializationBytes, n_bytes)
.observed_one_value_now(
Metric::BatchDeserializationTime,
(time, TimeUnit::Microseconds),
);
}
fn cursor_to_commit_received(&self, _frame_started_at: Instant, frame_completed_at: Instant) {
self.tx.observed_one_value_now(
Metric::CommitterCursorsReceivedLag,
(frame_completed_at.elapsed(), TimeUnit::Milliseconds),
);
}
fn cursors_commit_triggered(&self, trigger: CommitTrigger) {
match trigger {
CommitTrigger::Deadline {
n_batches,
n_events,
} => {
self.tx
.observed_one_value_now(Metric::CommitterTriggerDeadlineBatchesCount, n_batches)
.observed_one_value_now(Metric::CommitterTriggerDeadlineEventsCount, n_events);
}
CommitTrigger::Events {
n_batches,
n_events,
} => {
self.tx
.observed_one_value_now(Metric::CommitterTriggerEventsBatchesCount, n_batches)
.observed_one_value_now(Metric::CommitterTriggerEventsEventsCount, n_events);
}
CommitTrigger::Batches {
n_batches,
n_events,
} => {
self.tx
.observed_one_value_now(Metric::CommitterTriggerBatchesBatchesCount, n_batches)
.observed_one_value_now(Metric::CommitterTriggerBatchesEventsCount, n_events);
}
}
}
fn cursor_ages_on_commit_attempt(
&self,
first_cursor_age: Duration,
last_cursor_age: Duration,
first_cursor_age_warning: bool,
) {
self.tx
.observed_one_value_now(
Metric::CommitterFirstCursorAgeOnCommitAttempt,
(first_cursor_age, TimeUnit::Milliseconds),
)
.observed_one_value_now(
Metric::CommitterLastCursorAgeOnCommitAttempt,
(last_cursor_age, TimeUnit::Milliseconds),
);
if first_cursor_age_warning {
self.tx
.observed_one_now(Metric::CommitterFirstCursorAgeOnCommitAttemptAgeWarning);
}
}
fn cursors_committed(&self, n_cursors: usize, time: Duration) {
self.tx
.observed_one_value_now(Metric::CommitterCursorsCommittedCount, n_cursors)
.observed_one_value_now(
Metric::CommitterCursorsCommittedTime,
(time, TimeUnit::Milliseconds),
);
}
fn batches_committed(&self, n_batches: usize, n_events: usize) {
self.tx
.observed_one_value_now(
Metric::UncommittedBatchesChanged,
DecrementBy(n_batches as u32),
)
.observed_one_value_now(
Metric::UncommittedEventsChanged,
DecrementBy(n_events as u32),
);
}
fn cursors_not_committed(&self, n_cursors: usize, time: Duration, _err: &CommitError) {
self.tx.observed_one_now(Metric::CommitterCommitFailed);
self.tx
.observed_one_value_now(Metric::CommitterCursorsNotCommittedCount, n_cursors)
.observed_one_value_now(
Metric::CommitterCursorsNotCommittedTime,
(time, TimeUnit::Milliseconds),
);
}
fn commit_cursors_attempt_failed(&self, n_cursors: usize, time: Duration) {
self.tx
.observed_one_value_now(Metric::CommitterAttemptFailedCount, n_cursors)
.observed_one_value_now(
Metric::CommitterAttemptFailedTime,
(time, TimeUnit::Milliseconds),
);
}
fn stream_parameters(&self, params: &StreamParameters) {
self.tx
.observed_one_value_now(
Metric::StreamParametersMaxUncommittedEvents,
params
.max_uncommitted_events
.unwrap_or_default()
.into_inner(),
)
.observed_one_value_now(
Metric::StreamParametersBatchLimit,
params.batch_limit.unwrap_or_default().into_inner(),
)
.observed_one_value_now(
Metric::StreamParametersStreamTimeoutSecs,
params.stream_timeout_secs.unwrap_or_default().into_inner(),
)
.observed_one_value_now(
Metric::StreamParametersCommitTimeoutSecs,
params.commit_timeout_secs.unwrap_or_default().into_inner(),
)
.observed_one_value_now(
Metric::StreamParametersBatchFlushTimeoutSecs,
params
.batch_flush_timeout_secs
.unwrap_or_default()
.into_inner(),
);
}
}
#[derive(Clone, Copy, Eq, PartialEq)]
pub enum Metric {
ConsumerStarted,
ConsumerStoppedWithTime,
StreamingEndedWithTime,
StreamConnectedTime,
StreamNotConnectedTime,
StreamConnectAttemptSuccessTime,
StreamConnectAttemptFailedTime,
StreamChunkReceivedBytes,
StreamFrameCompletedBytes,
StreamFrameCompletedTime,
StreamTickEmitted,
StreamInfoFrameReceivedLag,
StreamKeepAliveFrameReceivedLag,
StreamBatchFrameReceivedBytes,
StreamBatchFrameReceivedLag,
StreamBatchFrameGap,
StreamDeadAfter,
StreamUnconsumedEvents,
StreamParametersMaxUncommittedEvents,
StreamParametersBatchLimit,
StreamParametersStreamTimeoutSecs,
StreamParametersCommitTimeoutSecs,
StreamParametersBatchFlushTimeoutSecs,
NoFramesForWarning,
NoEventsForWarning,
StreamErrorIo,
StreamErrorParse,
BatchesInFlightChanged,
EventsInFlightChanged,
BytesInFlightChanged,
UncommittedBatchesChanged,
UncommittedEventsChanged,
EventTypePartitionActivated,
EventTypePartitionDeactivatedAfter,
BatchProcessingStartedLag,
BatchProcessedBytes,
BatchProcessedTime,
BatchDeserializationBytes,
BatchDeserializationTime,
BatchProcessedNEvents,
CommitterCursorsReceivedLag,
CommitterCursorsCommittedTime,
CommitterCursorsCommittedCount,
CommitterCommitFailed,
CommitterCursorsNotCommittedTime,
CommitterCursorsNotCommittedCount,
CommitterAttemptFailedTime,
CommitterAttemptFailedCount,
CommitterTriggerDeadlineBatchesCount,
CommitterTriggerEventsBatchesCount,
CommitterTriggerBatchesBatchesCount,
CommitterTriggerDeadlineEventsCount,
CommitterTriggerEventsEventsCount,
CommitterTriggerBatchesEventsCount,
CommitterFirstCursorAgeOnCommitAttempt,
CommitterFirstCursorAgeOnCommitAttemptAgeWarning,
CommitterLastCursorAgeOnCommitAttempt,
}
mod instr {
use metrix::instruments::*;
use metrix::processor::TelemetryProcessor;
use metrix::TelemetryTransmitter;
use metrix::TimeUnit;
use super::{Metric, MetrixConfig};
pub fn create(
config: &MetrixConfig,
) -> (TelemetryTransmitter<Metric>, TelemetryProcessor<Metric>) {
let (tx, mut rx) = TelemetryProcessor::new_pair_without_name();
let mut cockpit = Cockpit::without_name();
create_notifications(&mut cockpit, config);
create_stream_metrics(&mut cockpit, config);
create_lag_metrics(&mut cockpit, config);
create_batch_metrics(&mut cockpit, config);
create_events_metrics(&mut cockpit, config);
create_committer_metrics(&mut cockpit, config);
create_event_type_partition_metrics(&mut cockpit, config);
rx.add_cockpit(cockpit);
(tx, rx)
}
fn create_notifications(cockpit: &mut Cockpit<Metric>, config: &MetrixConfig) {
use Metric::*;
let panel = Panel::named(AcceptAllLabels, "notifications")
.handler(
create_staircase_timer("consumer_started", config)
.for_label(Metric::ConsumerStarted),
)
.handler(
create_staircase_timer("consumer_stopped", config)
.for_label(Metric::ConsumerStoppedWithTime),
)
.handler(
create_staircase_timer("streaming_ended", config)
.for_label(Metric::StreamingEndedWithTime),
)
.handler(create_staircase_timer("stream_io_error", config).for_label(StreamErrorIo))
.handler(
create_staircase_timer("stream_parse_error", config).for_label(StreamErrorParse),
)
.handler(create_staircase_timer("no_events", config).for_label(NoEventsForWarning))
.handler(create_staircase_timer("no_frames", config).for_label(NoFramesForWarning))
.handler(
create_staircase_timer("commit_cursor_age_warning", config)
.for_label(CommitterFirstCursorAgeOnCommitAttemptAgeWarning),
)
.handler(
create_staircase_timer("commit_failed", config).for_label(CommitterCommitFailed),
)
.handler(create_staircase_timer("stream_dead", config).for_label(StreamDeadAfter))
.handler(
create_staircase_timer("connect_attempt_failed", config)
.for_label(StreamConnectAttemptFailedTime),
);
cockpit.add_panel(panel);
}
fn create_stream_metrics(cockpit: &mut Cockpit<Metric>, config: &MetrixConfig) {
let panel = Panel::named(AcceptAllLabels, "stream")
.panel(create_connector_metrics(config))
.panel(Panel::named(AcceptAllLabels, "ticks").meter(
Meter::new_with_defaults("emitted_per_second").for_label(Metric::StreamTickEmitted),
))
.panel(
Panel::named(AcceptAllLabels, "chunks")
.meter(
Meter::new_with_defaults("per_second")
.for_label(Metric::StreamChunkReceivedBytes),
)
.handler(
ValueMeter::new_with_defaults("bytes_per_second")
.for_label(Metric::StreamChunkReceivedBytes),
)
.histogram(
create_histogram("size_distribution", config)
.accept(Metric::StreamChunkReceivedBytes),
),
)
.panel(
Panel::named(AcceptAllLabels, "frames")
.gauge(
create_gauge("in_flight_bytes", config)
.for_label(Metric::BytesInFlightChanged),
)
.meter(
Meter::new_with_defaults("per_second")
.for_label(Metric::StreamFrameCompletedBytes),
)
.handler(
ValueMeter::new_with_defaults("bytes_per_second")
.for_label(Metric::StreamFrameCompletedBytes),
)
.histogram(
create_histogram("size_distribution", config)
.accept(Metric::StreamFrameCompletedBytes),
)
.histogram(
create_histogram("completion_time_us", config)
.display_time_unit(TimeUnit::Microseconds)
.accept(Metric::StreamFrameCompletedTime),
)
.meter(
Meter::new_with_defaults("info_frames_per_second")
.for_label(Metric::StreamInfoFrameReceivedLag),
)
.meter(
Meter::new_with_defaults("keep_alive_frames_per_second")
.for_label(Metric::StreamKeepAliveFrameReceivedLag),
)
.meter(
Meter::new_with_defaults("batch_frames_per_second")
.for_label(Metric::StreamBatchFrameReceivedLag),
)
.histogram(
create_histogram("batch_frame_gap_us", config)
.display_time_unit(TimeUnit::Microseconds)
.accept(Metric::StreamBatchFrameGap),
),
)
.panel(
Panel::named(AcceptAllLabels, "unconsumed_events").gauge(
create_gauge("stream", config).for_label(Metric::StreamUnconsumedEvents),
),
)
.panel(
Panel::named(
(
Metric::StreamParametersMaxUncommittedEvents,
Metric::StreamParametersBatchLimit,
Metric::StreamParametersStreamTimeoutSecs,
Metric::StreamParametersCommitTimeoutSecs,
Metric::StreamParametersBatchFlushTimeoutSecs,
),
"parameters",
)
.gauge(
Gauge::new_with_defaults("max_uncommitted_events")
.for_label(Metric::StreamParametersMaxUncommittedEvents),
)
.gauge(
Gauge::new_with_defaults("batch_limit")
.for_label(Metric::StreamParametersBatchLimit),
)
.gauge(
Gauge::new_with_defaults("stream_timeout_secs")
.for_label(Metric::StreamParametersStreamTimeoutSecs),
)
.gauge(
Gauge::new_with_defaults("commit_timeout_secs")
.for_label(Metric::StreamParametersCommitTimeoutSecs),
)
.gauge(
Gauge::new_with_defaults("batch_flush_timeout_secs")
.for_label(Metric::StreamParametersBatchFlushTimeoutSecs),
),
);
cockpit.add_panel(panel);
}
fn create_connector_metrics(config: &MetrixConfig) -> Panel<Metric> {
Panel::named(AcceptAllLabels, "connector")
.panel(
Panel::named(AcceptAllLabels, "attempts")
.meter(
Meter::new_with_defaults("success_per_second")
.for_label(Metric::StreamConnectAttemptSuccessTime),
)
.histogram(
Histogram::new_with_defaults("success_time_ms")
.display_time_unit(TimeUnit::Milliseconds)
.accept(Metric::StreamConnectAttemptSuccessTime),
)
.meter(
Meter::new_with_defaults("failed_per_second")
.for_label(Metric::StreamConnectAttemptFailedTime),
)
.histogram(
create_histogram("failed_time_ms", config)
.display_time_unit(TimeUnit::Milliseconds)
.accept(Metric::StreamConnectAttemptFailedTime),
),
)
.panel(
Panel::named(AcceptAllLabels, "connected")
.meter(
Meter::new_with_defaults("success_per_second")
.for_label(Metric::StreamConnectedTime),
)
.histogram(
Histogram::new_with_defaults("success_time_ms")
.display_time_unit(TimeUnit::Milliseconds)
.accept(Metric::StreamConnectedTime),
),
)
.panel(
Panel::named(AcceptAllLabels, "not_connected")
.meter(
Meter::new_with_defaults("success_per_second")
.for_label(Metric::StreamNotConnectedTime),
)
.histogram(
create_histogram("success_time_ms", config)
.display_time_unit(TimeUnit::Milliseconds)
.accept(Metric::StreamNotConnectedTime),
),
)
}
fn create_lag_metrics(cockpit: &mut Cockpit<Metric>, config: &MetrixConfig) {
let panel = Panel::named(AcceptAllLabels, "frame_lag")
.histogram(
create_histogram("stream_us", config)
.display_time_unit(TimeUnit::Microseconds)
.accept((
Metric::StreamBatchFrameReceivedLag,
Metric::StreamKeepAliveFrameReceivedLag,
Metric::StreamInfoFrameReceivedLag,
)),
)
.histogram(
create_histogram("batch_handlers_us", config)
.display_time_unit(TimeUnit::Microseconds)
.accept(Metric::BatchProcessingStartedLag),
)
.histogram(
create_histogram("committer_us", config)
.display_time_unit(TimeUnit::Microseconds)
.accept(Metric::CommitterCursorsReceivedLag),
);
cockpit.add_panel(panel);
}
fn create_batch_metrics(cockpit: &mut Cockpit<Metric>, config: &MetrixConfig) {
let panel = Panel::named(AcceptAllLabels, "batches")
.gauge(create_gauge("in_flight", config).for_label(Metric::BatchesInFlightChanged))
.gauge(create_gauge("in_processing", config).inc_dec_on(
Metric::BatchProcessingStartedLag,
Metric::BatchProcessedBytes,
))
.meter(
Meter::new_with_defaults("per_second").for_label(Metric::BatchProcessingStartedLag),
)
.histogram(
create_histogram("processing_time_us", config)
.display_time_unit(TimeUnit::Microseconds)
.for_label(Metric::BatchProcessedTime),
)
.handler(
ValueMeter::new_with_defaults("bytes_per_second")
.for_label(Metric::BatchProcessedBytes),
)
.histogram(
create_histogram("bytes_per_batch", config).for_label(Metric::BatchProcessedBytes),
);
cockpit.add_panel(panel);
}
fn create_events_metrics(cockpit: &mut Cockpit<Metric>, config: &MetrixConfig) {
let panel = Panel::named(AcceptAllLabels, "events")
.gauge(create_gauge("in_flight", config).for_label(Metric::EventsInFlightChanged))
.handler(
ValueMeter::new_with_defaults("per_second")
.for_label(Metric::BatchProcessedNEvents),
)
.histogram(
create_histogram("per_batch", config).for_label(Metric::BatchProcessedNEvents),
)
.histogram(
create_histogram("deserialization_time_us", config)
.display_time_unit(TimeUnit::Microseconds)
.for_label(Metric::BatchDeserializationTime),
)
.handler(
ValueMeter::new_with_defaults("deserialization_bytes_per_second")
.for_label(Metric::BatchDeserializationBytes),
);
cockpit.add_panel(panel);
}
fn create_committer_metrics(cockpit: &mut Cockpit<Metric>, config: &MetrixConfig) {
let panel = Panel::named(AcceptAllLabels, "committer")
.panel(
Panel::named(AcceptAllLabels, "triggers")
.panel(
Panel::named(
(
Metric::CommitterTriggerDeadlineBatchesCount,
Metric::CommitterTriggerDeadlineEventsCount,
),
"deadline",
)
.meter(
Meter::new_with_defaults("occurrences_per_second")
.for_label(Metric::CommitterTriggerDeadlineBatchesCount),
)
.handler(
ValueMeter::new_with_defaults("batches_per_second")
.for_label(Metric::CommitterTriggerDeadlineBatchesCount),
)
.histogram(
create_histogram("batches_distribution", config)
.for_label(Metric::CommitterTriggerDeadlineBatchesCount),
)
.handler(
ValueMeter::new_with_defaults("events_per_second")
.for_label(Metric::CommitterTriggerDeadlineEventsCount),
)
.histogram(
create_histogram("events_distribution", config)
.for_label(Metric::CommitterTriggerDeadlineEventsCount),
),
)
.panel(
Panel::named(
(
Metric::CommitterTriggerBatchesBatchesCount,
Metric::CommitterTriggerBatchesEventsCount,
),
"batches",
)
.meter(
Meter::new_with_defaults("occurrences_per_second")
.for_label(Metric::CommitterTriggerBatchesBatchesCount),
)
.handler(
ValueMeter::new_with_defaults("batches_per_second")
.for_label(Metric::CommitterTriggerBatchesBatchesCount),
)
.histogram(
create_histogram("batches_distribution", config)
.for_label(Metric::CommitterTriggerBatchesBatchesCount),
)
.handler(
ValueMeter::new_with_defaults("events_per_second")
.for_label(Metric::CommitterTriggerBatchesEventsCount),
)
.histogram(
create_histogram("events_distribution", config)
.for_label(Metric::CommitterTriggerBatchesEventsCount),
),
)
.panel(
Panel::named(
(
Metric::CommitterTriggerEventsBatchesCount,
Metric::CommitterTriggerEventsEventsCount,
),
"events",
)
.meter(
Meter::new_with_defaults("occurrences_per_second")
.for_label(Metric::CommitterTriggerEventsBatchesCount),
)
.handler(
ValueMeter::new_with_defaults("batches_per_second")
.for_label(Metric::CommitterTriggerEventsBatchesCount),
)
.histogram(
create_histogram("batches_distribution", config)
.for_label(Metric::CommitterTriggerEventsBatchesCount),
)
.handler(
ValueMeter::new_with_defaults("events_per_second")
.for_label(Metric::CommitterTriggerEventsEventsCount),
)
.histogram(
create_histogram("events_distribution", config)
.for_label(Metric::CommitterTriggerEventsEventsCount),
),
),
)
.panel(
Panel::named(AcceptAllLabels, "cursors")
.meter(
Meter::new_with_defaults("received_per_second")
.for_label(Metric::CommitterCursorsReceivedLag),
)
.handler(
ValueMeter::new_with_defaults("committed_per_second")
.for_label(Metric::CommitterCursorsCommittedCount),
)
.handler(
ValueMeter::new_with_defaults("not_committed_per_second")
.for_label(Metric::CommitterCursorsNotCommittedCount),
)
.handler(
ValueMeter::new_with_defaults("attempt_failed_per_second")
.for_label(Metric::CommitterCursorsNotCommittedCount),
),
)
.panel(
Panel::named(
(
Metric::CommitterFirstCursorAgeOnCommitAttempt,
Metric::CommitterLastCursorAgeOnCommitAttempt,
),
"cursor_ages_on_commit_attempt",
)
.histogram(
create_histogram("first_ms", config)
.display_time_unit(TimeUnit::Milliseconds)
.for_label(Metric::CommitterFirstCursorAgeOnCommitAttempt),
)
.histogram(
create_histogram("last_ms", config)
.display_time_unit(TimeUnit::Milliseconds)
.for_label(Metric::CommitterLastCursorAgeOnCommitAttempt),
),
)
.panel(
Panel::named(Metric::CommitterCursorsCommittedTime, "committed")
.meter(
Meter::new_with_defaults("per_second")
.for_label(Metric::CommitterCursorsCommittedTime),
)
.histogram(
create_histogram("latency_ms", config)
.display_time_unit(TimeUnit::Milliseconds)
.for_label(Metric::CommitterCursorsCommittedTime),
),
)
.panel(
Panel::named(Metric::CommitterCursorsNotCommittedTime, "not_committed")
.meter(
Meter::new_with_defaults("per_second")
.for_label(Metric::CommitterCursorsNotCommittedTime),
)
.histogram(
create_histogram("latency_ms", config)
.display_time_unit(TimeUnit::Milliseconds)
.for_label(Metric::CommitterCursorsNotCommittedTime),
),
)
.panel(
Panel::named(
(
Metric::UncommittedBatchesChanged,
Metric::UncommittedEventsChanged,
),
"uncommitted",
)
.gauge(create_gauge("batches", config).for_label(Metric::UncommittedBatchesChanged))
.gauge(create_gauge("events", config).for_label(Metric::UncommittedEventsChanged)),
)
.panel(
Panel::named(
(
Metric::CommitterAttemptFailedCount,
Metric::CommitterAttemptFailedTime,
),
"failed_attempts",
)
.meter(
Meter::new_with_defaults("per_second")
.for_label(Metric::CommitterAttemptFailedCount),
)
.histogram(
create_histogram("latency_ms", config)
.display_time_unit(TimeUnit::Milliseconds)
.for_label(Metric::CommitterAttemptFailedTime),
),
);
cockpit.add_panel(panel);
}
fn create_event_type_partition_metrics(cockpit: &mut Cockpit<Metric>, config: &MetrixConfig) {
let panel = Panel::named(AcceptAllLabels, "event_type_partitions").gauge(
create_gauge("active", config).inc_dec_on(
Metric::EventTypePartitionActivated,
Metric::EventTypePartitionDeactivatedAfter,
),
);
cockpit.add_panel(panel);
}
fn create_gauge(name: &str, config: &MetrixConfig) -> Gauge {
let tracking_seconds = config.gauge_tracking_secs.unwrap_or_default();
Gauge::new(name)
.tracking(tracking_seconds.into_inner() as usize)
.group_values(true)
}
fn create_staircase_timer(name: &str, config: &MetrixConfig) -> StaircaseTimer {
let switch_off_after = config.alert_duration_secs.unwrap_or_default();
StaircaseTimer::new(name).switch_off_after(switch_off_after.into())
}
fn create_histogram(name: &str, config: &MetrixConfig) -> Histogram {
let inactivity_dur = config
.histogram_inactivity_reset_secs
.unwrap_or_default()
.into_duration();
Histogram::new(name)
.inactivity_limit(inactivity_dur)
.reset_after_inactivity(true)
.show_activity_state(false)
}
}
| 39.993124 | 100 | 0.561393 |
6a498ffc363b221e35a59981c33044c996fd8c72 | 3,689 | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use env::{emitter::Emitter, Env};
use error::{Error, Result};
use hhbc::{hhas_attribute::HhasAttribute, hhbc_id, typed_value::TypedValue};
use naming_special_names::user_attributes as ua;
use naming_special_names_rust as naming_special_names;
use oxidized::ast as a;
pub fn from_asts<'arena, 'decl>(
e: &mut Emitter<'arena, 'decl>,
attrs: &[a::UserAttribute],
) -> Result<Vec<HhasAttribute<'arena>>> {
attrs.iter().map(|attr| from_ast(e, attr)).collect()
}
pub fn from_ast<'arena, 'decl>(
e: &mut Emitter<'arena, 'decl>,
attr: &a::UserAttribute,
) -> Result<HhasAttribute<'arena>> {
let arguments = constant_folder::literals_from_exprs(
&mut attr.params.clone(),
e,
)
.map_err(|err| {
assert_eq!(
err,
constant_folder::Error::UserDefinedConstant,
"literals_from_expr should have panicked for an error other than UserDefinedConstant"
);
Error::fatal_parse(&attr.name.0, "Attribute arguments must be literals")
})?;
let fully_qualified_id = if attr.name.1.starts_with("__") {
// don't do anything to builtin attributes
&attr.name.1
} else {
hhbc_id::class::ClassType::from_ast_name_and_mangle(e.alloc, &attr.name.1).unsafe_as_str()
};
Ok(HhasAttribute {
name: e.alloc.alloc_str(fully_qualified_id).into(),
arguments: e.alloc.alloc_slice_fill_iter(arguments.into_iter()).into(),
})
}
/// Adds an __Reified attribute for functions and classes with reified type
/// parameters. The arguments to __Reified are number of type parameters
/// followed by the indicies of these reified type parameters and whether they
/// are soft reified or not
pub fn add_reified_attribute<'arena>(
alloc: &'arena bumpalo::Bump,
tparams: &[a::Tparam],
) -> Option<HhasAttribute<'arena>> {
let reified_data: Vec<(usize, bool, bool)> = tparams
.iter()
.enumerate()
.filter_map(|(i, tparam)| {
if tparam.reified == a::ReifyKind::Erased {
None
} else {
let soft = tparam.user_attributes.iter().any(|a| a.name.1 == ua::SOFT);
let warn = tparam.user_attributes.iter().any(|a| a.name.1 == ua::WARN);
Some((i, soft, warn))
}
})
.collect();
if reified_data.is_empty() {
return None;
}
let name = "__Reified".into();
let bool2i64 = |b| b as i64;
// NOTE(hrust) hopefully faster than .into_iter().flat_map(...).collect()
let mut arguments =
bumpalo::collections::vec::Vec::with_capacity_in(reified_data.len() * 3 + 1, alloc);
arguments.push(TypedValue::Int(tparams.len() as i64));
for (i, soft, warn) in reified_data.into_iter() {
arguments.push(TypedValue::Int(i as i64));
arguments.push(TypedValue::Int(bool2i64(soft)));
arguments.push(TypedValue::Int(bool2i64(warn)));
}
Some(HhasAttribute {
name,
arguments: arguments.into_bump_slice().into(),
})
}
pub fn add_reified_parent_attribute<'a, 'arena>(
env: &Env<'a, 'arena>,
extends: &[a::Hint],
) -> Option<HhasAttribute<'arena>> {
if let Some((_, hl)) = extends.first().and_then(|h| h.1.as_happly()) {
if emit_expression::has_non_tparam_generics(env, hl) {
return Some(HhasAttribute {
name: "__HasReifiedParent".into(),
arguments: ffi::Slice::empty(),
});
}
}
None
}
| 35.471154 | 98 | 0.622662 |
d627de24d679451b66a8d9778720593479bc886b | 1,221 | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that the trace_macros feature gate is on.
fn main() {
trace_macros!(); //~ ERROR `trace_macros` is not stable
trace_macros!(1); //~ ERROR `trace_macros` is not stable
trace_macros!(ident); //~ ERROR `trace_macros` is not stable
trace_macros!(for); //~ ERROR `trace_macros` is not stable
trace_macros!(true,); //~ ERROR `trace_macros` is not stable
trace_macros!(false 1); //~ ERROR `trace_macros` is not stable
// Errors are signalled early for the above, before expansion.
// See trace_macros-gate2 and trace_macros-gate3. for examples
// of the below being caught.
macro_rules! expando {
($x: ident) => { trace_macros!($x) }
}
expando!(true); //~ ERROR `trace_macros` is not stable
}
| 39.387097 | 68 | 0.694513 |
16263eed8175ac41590879c04622b68ea4fc1f9a | 3,460 | use std::mem;
use std::ops::{Deref, DerefMut};
use core_foundation::base::{mach_port_t, kCFNull, kCFAllocatorDefault, CFType, TCFType};
use core_foundation::dictionary::{CFDictionary, CFMutableDictionary, CFMutableDictionaryRef};
use core_foundation::string::CFString;
use mach::{port, mach_port, kern_return, traps};
use crate::Result;
use super::{sys};
#[derive(Debug)]
pub struct IoMasterPort(mach_port_t);
impl IoMasterPort {
pub fn new() -> Result<IoMasterPort> {
let mut master_port: port::mach_port_t = port::MACH_PORT_NULL;
unsafe {
kern_try!(sys::IOMasterPort(sys::kIOMasterPortDefault, &mut master_port));
}
Ok(IoMasterPort(master_port))
}
pub fn get_services(&self) -> Result<IoIterator> {
let service = unsafe {
let ret = sys::IOServiceMatching(sys::IOPM_SERVICE_NAME);
assert_ne!(ret as *const _, kCFNull);
ret
};
let mut iterator = IoIterator::default();
unsafe {
kern_try!(sys::IOServiceGetMatchingServices(self.0, service, &mut *iterator));
}
Ok(iterator)
}
}
impl Drop for IoMasterPort {
fn drop(&mut self) {
let result = unsafe {
mach_port::mach_port_deallocate(traps::mach_task_self(), self.0)
};
assert_eq!(result, kern_return::KERN_SUCCESS);
}
}
#[derive(Debug)]
pub struct IoObject(sys::io_object_t);
impl IoObject {
/// Returns typed dictionary with this object properties.
/// In our case all keys are CFStrings, so there is no need to return
/// untyped dict here.
pub fn properties(&self) -> Result<CFDictionary<CFString, CFType>> {
unsafe {
// MSRV is 1.32 and `std::mem::MaybeUninit` appeared only at 1.36
// TODO: Switch to `MaybeUninit` as soon as MSRV will be bumped.
#[allow(deprecated)]
let mut props: CFMutableDictionaryRef = mem::uninitialized();
kern_try!(sys::IORegistryEntryCreateCFProperties(self.0, &mut props,
kCFAllocatorDefault, 0));
Ok(CFMutableDictionary::wrap_under_create_rule(props).to_immutable())
}
}
}
impl Drop for IoObject {
fn drop(&mut self) {
let result = unsafe {
sys::IOObjectRelease(self.0)
};
assert_eq!(result, kern_return::KERN_SUCCESS);
}
}
#[derive(Debug)]
pub struct IoIterator(sys::io_iterator_t);
impl Deref for IoIterator {
type Target = sys::io_iterator_t;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for IoIterator {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl Iterator for IoIterator {
type Item = IoObject;
fn next(&mut self) -> Option<Self::Item> {
match unsafe { sys::IOIteratorNext(self.0) } {
0 => None, // TODO: Should not there be some `NULL`?
io_object => Some(IoObject(io_object))
}
}
}
impl Drop for IoIterator {
fn drop(&mut self) {
let result = unsafe {
sys::IOObjectRelease(self.0)
};
assert_eq!(result, kern_return::KERN_SUCCESS);
}
}
impl Default for IoIterator {
// It is extremely unsafe and inner field MUST BE initialized
// before the further `Drop::drop` call
fn default() -> IoIterator {
let inner = unsafe {
mem::zeroed()
};
IoIterator(inner)
}
}
| 26.412214 | 93 | 0.613873 |
ccb0044df9e55da91568e5b45be0f1df5be47c72 | 319 | use yew_router::{switch::Permissive, Switch};
#[derive(Debug, Clone, Switch)]
pub enum AppRoute {
#[to = "/login!"]
Login,
#[to = "/dashboard!"]
Dashboard,
#[to = "/list/{id}"]
List { id: usize },
#[to = "/user!"]
User,
#[to = "/page-not-found"]
NotFound(Permissive<String>),
}
| 19.9375 | 45 | 0.53605 |
4b0a3e7de797b9d70300490fd294b0831107f85e | 14,526 | //! Instruction Set Architectures.
//!
//! The `isa` module provides a `TargetIsa` trait which provides the behavior specialization needed
//! by the ISA-independent code generator. The sub-modules of this module provide definitions for
//! the instruction sets that Cranelift can target. Each sub-module has it's own implementation of
//! `TargetIsa`.
//!
//! # Constructing a `TargetIsa` instance
//!
//! The target ISA is built from the following information:
//!
//! - The name of the target ISA as a string. Cranelift is a cross-compiler, so the ISA to target
//! can be selected dynamically. Individual ISAs can be left out when Cranelift is compiled, so a
//! string is used to identify the proper sub-module.
//! - Values for settings that apply to all ISAs. This is represented by a `settings::Flags`
//! instance.
//! - Values for ISA-specific settings.
//!
//! The `isa::lookup()` function is the main entry point which returns an `isa::Builder`
//! appropriate for the requested ISA:
//!
//! ```
//! # extern crate cranelift_codegen;
//! # #[macro_use] extern crate target_lexicon;
//! use cranelift_codegen::isa;
//! use cranelift_codegen::settings::{self, Configurable};
//! use std::str::FromStr;
//! use target_lexicon::Triple;
//!
//! let shared_builder = settings::builder();
//! let shared_flags = settings::Flags::new(shared_builder);
//!
//! match isa::lookup(triple!("riscv32")) {
//! Err(_) => {
//! // The RISC-V target ISA is not available.
//! }
//! Ok(mut isa_builder) => {
//! isa_builder.set("supports_m", "on");
//! let isa = isa_builder.finish(shared_flags);
//! }
//! }
//! ```
//!
//! The configured target ISA trait object is a `Box<TargetIsa>` which can be used for multiple
//! concurrent function compilations.
pub use crate::isa::call_conv::CallConv;
pub use crate::isa::constraints::{
BranchRange, ConstraintKind, OperandConstraint, RecipeConstraints,
};
pub use crate::isa::encoding::{base_size, EncInfo, Encoding};
pub use crate::isa::registers::{regs_overlap, RegClass, RegClassIndex, RegInfo, RegUnit};
pub use crate::isa::stack::{StackBase, StackBaseMask, StackRef};
use crate::binemit;
use crate::flowgraph;
use crate::ir;
use crate::isa::enc_tables::Encodings;
use crate::regalloc;
use crate::result::CodegenResult;
use crate::settings;
use crate::settings::SetResult;
use crate::timing;
use alloc::borrow::Cow;
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::fmt;
use target_lexicon::{triple, Architecture, PointerWidth, Triple};
use thiserror::Error;
#[cfg(feature = "riscv")]
mod riscv;
#[cfg(feature = "x86")]
mod x86;
#[cfg(feature = "arm32")]
mod arm32;
#[cfg(feature = "arm64")]
mod arm64;
mod call_conv;
mod constraints;
mod enc_tables;
mod encoding;
pub mod registers;
mod stack;
/// Returns a builder that can create a corresponding `TargetIsa`
/// or `Err(LookupError::SupportDisabled)` if not enabled.
macro_rules! isa_builder {
($name: ident, $feature: tt, $triple: ident) => {{
#[cfg(feature = $feature)]
{
Ok($name::isa_builder($triple))
}
#[cfg(not(feature = $feature))]
{
Err(LookupError::SupportDisabled)
}
}};
}
/// Look for an ISA for the given `triple`.
/// Return a builder that can create a corresponding `TargetIsa`.
pub fn lookup(triple: Triple) -> Result<Builder, LookupError> {
match triple.architecture {
Architecture::Riscv32 | Architecture::Riscv64 => isa_builder!(riscv, "riscv", triple),
Architecture::I386 | Architecture::I586 | Architecture::I686 | Architecture::X86_64 => {
isa_builder!(x86, "x86", triple)
}
Architecture::Arm { .. } => isa_builder!(arm32, "arm32", triple),
Architecture::Aarch64 { .. } => isa_builder!(arm64, "arm64", triple),
_ => Err(LookupError::Unsupported),
}
}
/// Look for a supported ISA with the given `name`.
/// Return a builder that can create a corresponding `TargetIsa`.
pub fn lookup_by_name(name: &str) -> Result<Builder, LookupError> {
use alloc::str::FromStr;
lookup(triple!(name))
}
/// Describes reason for target lookup failure
#[derive(Error, PartialEq, Eq, Copy, Clone, Debug)]
pub enum LookupError {
/// Support for this target was disabled in the current build.
#[error("Support for this target is disabled")]
SupportDisabled,
/// Support for this target has not yet been implemented.
#[error("Support for this target has not been implemented yet")]
Unsupported,
}
/// Builder for a `TargetIsa`.
/// Modify the ISA-specific settings before creating the `TargetIsa` trait object with `finish`.
pub struct Builder {
triple: Triple,
setup: settings::Builder,
constructor: fn(Triple, settings::Flags, settings::Builder) -> Box<dyn TargetIsa>,
}
impl Builder {
/// Combine the ISA-specific settings with the provided ISA-independent settings and allocate a
/// fully configured `TargetIsa` trait object.
pub fn finish(self, shared_flags: settings::Flags) -> Box<dyn TargetIsa> {
(self.constructor)(self.triple, shared_flags, self.setup)
}
}
impl settings::Configurable for Builder {
fn set(&mut self, name: &str, value: &str) -> SetResult<()> {
self.setup.set(name, value)
}
fn enable(&mut self, name: &str) -> SetResult<()> {
self.setup.enable(name)
}
}
/// After determining that an instruction doesn't have an encoding, how should we proceed to
/// legalize it?
///
/// The `Encodings` iterator returns a legalization function to call.
pub type Legalize =
fn(ir::Inst, &mut ir::Function, &mut flowgraph::ControlFlowGraph, &dyn TargetIsa) -> bool;
/// This struct provides information that a frontend may need to know about a target to
/// produce Cranelift IR for the target.
#[derive(Clone, Copy)]
pub struct TargetFrontendConfig {
/// The default calling convention of the target.
pub default_call_conv: CallConv,
/// The pointer width of the target.
pub pointer_width: PointerWidth,
}
impl TargetFrontendConfig {
/// Get the pointer type of this target.
pub fn pointer_type(self) -> ir::Type {
ir::Type::int(u16::from(self.pointer_bits())).unwrap()
}
/// Get the width of pointers on this target, in units of bits.
pub fn pointer_bits(self) -> u8 {
self.pointer_width.bits()
}
/// Get the width of pointers on this target, in units of bytes.
pub fn pointer_bytes(self) -> u8 {
self.pointer_width.bytes()
}
}
/// Methods that are specialized to a target ISA. Implies a Display trait that shows the
/// shared flags, as well as any isa-specific flags.
pub trait TargetIsa: fmt::Display + Send + Sync {
/// Get the name of this ISA.
fn name(&self) -> &'static str;
/// Get the target triple that was used to make this trait object.
fn triple(&self) -> &Triple;
/// Get the ISA-independent flags that were used to make this trait object.
fn flags(&self) -> &settings::Flags;
/// Get the default calling convention of this target.
fn default_call_conv(&self) -> CallConv {
CallConv::triple_default(self.triple())
}
/// Get the pointer type of this ISA.
fn pointer_type(&self) -> ir::Type {
ir::Type::int(u16::from(self.pointer_bits())).unwrap()
}
/// Get the width of pointers on this ISA.
fn pointer_width(&self) -> PointerWidth {
self.triple().pointer_width().unwrap()
}
/// Get the width of pointers on this ISA, in units of bits.
fn pointer_bits(&self) -> u8 {
self.pointer_width().bits()
}
/// Get the width of pointers on this ISA, in units of bytes.
fn pointer_bytes(&self) -> u8 {
self.pointer_width().bytes()
}
/// Get the information needed by frontends producing Cranelift IR.
fn frontend_config(&self) -> TargetFrontendConfig {
TargetFrontendConfig {
default_call_conv: self.default_call_conv(),
pointer_width: self.pointer_width(),
}
}
/// Does the CPU implement scalar comparisons using a CPU flags register?
fn uses_cpu_flags(&self) -> bool {
false
}
/// Does the CPU implement multi-register addressing?
fn uses_complex_addresses(&self) -> bool {
false
}
/// Get a data structure describing the registers in this ISA.
fn register_info(&self) -> RegInfo;
/// Returns an iterator over legal encodings for the instruction.
fn legal_encodings<'a>(
&'a self,
func: &'a ir::Function,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type,
) -> Encodings<'a>;
/// Encode an instruction after determining it is legal.
///
/// If `inst` can legally be encoded in this ISA, produce the corresponding `Encoding` object.
/// Otherwise, return `Legalize` action.
///
/// This is also the main entry point for determining if an instruction is legal.
fn encode(
&self,
func: &ir::Function,
inst: &ir::InstructionData,
ctrl_typevar: ir::Type,
) -> Result<Encoding, Legalize> {
let mut iter = self.legal_encodings(func, inst, ctrl_typevar);
iter.next().ok_or_else(|| iter.legalize())
}
/// Get a data structure describing the instruction encodings in this ISA.
fn encoding_info(&self) -> EncInfo;
/// Legalize a function signature.
///
/// This is used to legalize both the signature of the function being compiled and any called
/// functions. The signature should be modified by adding `ArgumentLoc` annotations to all
/// arguments and return values.
///
/// Arguments with types that are not supported by the ABI can be expanded into multiple
/// arguments:
///
/// - Integer types that are too large to fit in a register can be broken into multiple
/// arguments of a smaller integer type.
/// - Floating point types can be bit-cast to an integer type of the same size, and possible
/// broken into smaller integer types.
/// - Vector types can be bit-cast and broken down into smaller vectors or scalars.
///
/// The legalizer will adapt argument and return values as necessary at all ABI boundaries.
///
/// When this function is called to legalize the signature of the function currently being
/// compiled, `current` is true. The legalized signature can then also contain special purpose
/// arguments and return values such as:
///
/// - A `link` argument representing the link registers on RISC architectures that don't push
/// the return address on the stack.
/// - A `link` return value which will receive the value that was passed to the `link`
/// argument.
/// - An `sret` argument can be added if one wasn't present already. This is necessary if the
/// signature returns more values than registers are available for returning values.
/// - An `sret` return value can be added if the ABI requires a function to return its `sret`
/// argument in a register.
///
/// Arguments and return values for the caller's frame pointer and other callee-saved registers
/// should not be added by this function. These arguments are not added until after register
/// allocation.
fn legalize_signature(&self, sig: &mut Cow<ir::Signature>, current: bool);
/// Get the register class that should be used to represent an ABI argument or return value of
/// type `ty`. This should be the top-level register class that contains the argument
/// registers.
///
/// This function can assume that it will only be asked to provide register classes for types
/// that `legalize_signature()` produces in `ArgumentLoc::Reg` entries.
fn regclass_for_abi_type(&self, ty: ir::Type) -> RegClass;
/// Get the set of allocatable registers that can be used when compiling `func`.
///
/// This set excludes reserved registers like the stack pointer and other special-purpose
/// registers.
fn allocatable_registers(&self, func: &ir::Function) -> regalloc::RegisterSet;
/// Compute the stack layout and insert prologue and epilogue code into `func`.
///
/// Return an error if the stack frame is too large.
fn prologue_epilogue(&self, func: &mut ir::Function) -> CodegenResult<()> {
let _tt = timing::prologue_epilogue();
// This default implementation is unlikely to be good enough.
use crate::ir::stackslot::{StackOffset, StackSize};
use crate::stack_layout::layout_stack;
let word_size = StackSize::from(self.pointer_bytes());
// Account for the SpiderMonkey standard prologue pushes.
if func.signature.call_conv.extends_baldrdash() {
let bytes = StackSize::from(self.flags().baldrdash_prologue_words()) * word_size;
let mut ss = ir::StackSlotData::new(ir::StackSlotKind::IncomingArg, bytes);
ss.offset = Some(-(bytes as StackOffset));
func.stack_slots.push(ss);
}
let is_leaf = func.is_leaf();
layout_stack(&mut func.stack_slots, is_leaf, word_size)?;
Ok(())
}
/// Emit binary machine code for a single instruction into the `sink` trait object.
///
/// Note that this will call `put*` methods on the `sink` trait object via its vtable which
/// is not the fastest way of emitting code.
///
/// This function is under the "testing_hooks" feature, and is only suitable for use by
/// test harnesses. It increases code size, and is inefficient.
#[cfg(feature = "testing_hooks")]
fn emit_inst(
&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut dyn binemit::CodeSink,
);
/// Emit a whole function into memory.
fn emit_function_to_memory(&self, func: &ir::Function, sink: &mut binemit::MemoryCodeSink);
/// IntCC condition for Unsigned Addition Overflow (Carry).
fn unsigned_add_overflow_condition(&self) -> ir::condcodes::IntCC;
/// IntCC condition for Unsigned Subtraction Overflow (Borrow/Carry).
fn unsigned_sub_overflow_condition(&self) -> ir::condcodes::IntCC;
/// Emit unwind information for the given function.
///
/// Only some calling conventions (e.g. Windows fastcall) will have unwind information.
fn emit_unwind_info(&self, _func: &ir::Function, _mem: &mut Vec<u8>) {
// No-op by default
}
}
| 37.341902 | 99 | 0.666185 |
1d898c6423dba88b26827e4c37fddbe0a44618fd | 4,663 | // SPDX-License-Identifier: Apache-2.0
use super::cpuid_page::CpuidPage;
use super::snp::firmware::Firmware;
use super::snp::launch::*;
use super::SnpKeepPersonality;
use crate::backend::kvm::builder::kvm_try_from_builder;
use crate::backend::kvm::mem::Region;
use std::convert::TryFrom;
use std::sync::{Arc, RwLock};
use anyhow::Context;
use anyhow::{Error, Result};
use kvm_ioctls::{Kvm, VmFd};
use mmarinus::{perms, Map};
use primordial::Page;
use sallyport::elf::pf::snp::{CPUID, SECRETS};
use x86_64::VirtAddr;
pub struct Builder {
kvm_fd: Kvm,
launcher: Launcher<Started, VmFd, Firmware>,
regions: Vec<Region>,
sallyports: Vec<Option<VirtAddr>>,
}
impl TryFrom<super::super::kvm::config::Config> for Builder {
type Error = Error;
fn try_from(_config: super::super::kvm::config::Config) -> Result<Self> {
let kvm_fd = Kvm::new()?;
let vm_fd = kvm_fd.create_vm()?;
let sev = Firmware::open()?;
let launcher = Launcher::new(vm_fd, sev)?;
let start = Start {
policy: Policy {
flags: PolicyFlags::SMT,
..Default::default()
},
gosvw: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
..Default::default()
};
let launcher = launcher.start(start)?;
Ok(Builder {
kvm_fd,
launcher,
regions: Vec::new(),
sallyports: Vec::new(),
})
}
}
impl super::super::Mapper for Builder {
type Config = super::super::kvm::config::Config;
type Output = Arc<dyn super::super::Keep>;
fn map(
&mut self,
mut pages: Map<perms::ReadWrite>,
to: usize,
with: u32,
) -> anyhow::Result<()> {
// Ignore regions with no pages.
if pages.is_empty() {
return Ok(());
}
let mem_region = super::super::kvm::builder::kvm_builder_map(
&mut self.sallyports,
self.launcher.as_mut(),
&mut pages,
to,
with,
self.regions.len() as _,
)?;
let dp = VmplPerms::empty();
if with & CPUID != 0 {
assert_eq!(pages.len(), Page::SIZE);
let mut cpuid_page = CpuidPage::default();
cpuid_page.import_from_kvm(&mut self.kvm_fd)?;
let guest_cpuid_page = pages.as_mut_ptr() as *mut CpuidPage;
unsafe {
guest_cpuid_page.write(cpuid_page);
}
let update = Update::new(
to as u64 >> 12,
&pages,
false,
PageType::Cpuid,
(dp, dp, dp),
);
if self.launcher.update_data(update).is_err() {
// FIXME: just try again with the firmware corrected values
self.launcher
.update_data(update)
.context("launcher.update_data for CPUID failed")?;
}
} else if with & SECRETS != 0 {
assert_eq!(pages.len(), Page::SIZE);
let update = Update::new(
to as u64 >> 12,
&pages,
false,
PageType::Secrets,
(dp, dp, dp),
);
self.launcher
.update_data(update)
.context("SNP Launcher update_data")?;
} else {
let update = Update::new(
to as u64 >> 12,
&pages,
false,
PageType::Normal,
(dp, dp, dp),
);
self.launcher
.update_data(update)
.context("SNP Launcher update_data")?;
};
self.regions.push(Region::new(mem_region, pages));
Ok(())
}
}
impl TryFrom<Builder> for Arc<dyn super::super::Keep> {
type Error = Error;
fn try_from(mut builder: Builder) -> Result<Self> {
let (vcpu_fd, sallyport_block_start) = kvm_try_from_builder(
&builder.sallyports,
&mut builder.kvm_fd,
builder.launcher.as_mut(),
)?;
let finish = Finish::new(None, None, [0u8; 32]);
let (vm_fd, sev_fd) = builder.launcher.finish(finish)?;
Ok(Arc::new(RwLock::new(super::Keep::<SnpKeepPersonality> {
kvm_fd: builder.kvm_fd,
vm_fd,
cpu_fds: vec![vcpu_fd],
regions: builder.regions,
sallyports: builder.sallyports,
sallyport_start: sallyport_block_start,
personality: SnpKeepPersonality { _sev_fd: sev_fd },
})))
}
}
| 27.591716 | 77 | 0.514261 |
6773ad15f95283f783b1767311390d6dca413dee | 1,144 | //! SeaORM Entity. Generated by sea-orm-codegen 0.6.0
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "underscores")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: u32,
#[sea_orm(column_name = "a_b_c_d")]
pub a_b_c_d: i32,
#[sea_orm(column_name = "a_b_c_dd")]
pub a_b_c_dd: i32,
#[sea_orm(column_name = "a_b_cc_d")]
pub a_b_cc_d: i32,
#[sea_orm(column_name = "a_bb_c_d")]
pub a_bb_c_d: i32,
#[sea_orm(column_name = "aa_b_c_d")]
pub aa_b_c_d: i32,
}
#[derive(Copy, Clone, Debug, EnumIter)]
pub enum Relation {}
impl RelationTrait for Relation {
fn def(&self) -> RelationDef {
panic!("No RelationDef")
}
}
impl ActiveModelBehavior for ActiveModel {}
#[cfg(test)]
mod tests {
use super::*;
use sea_orm::Iterable;
#[test]
fn column_names() {
assert_eq!(
Column::iter().map(|c| c.to_string()).collect::<Vec<_>>(),
vec!["id", "a_b_c_d", "a_b_c_dd", "a_b_cc_d", "a_bb_c_d", "aa_b_c_d"]
)
}
}
| 24.340426 | 81 | 0.621503 |
21f3a15509ac4f7c7412db1ae4675ed3c661a0ad | 1,605 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
use core::fmt;
use crate::io::prelude::*;
#[cfg(feature = "stdio")]
use crate::sys::stdio::panic_output;
#[cfg(feature = "stdio")]
pub fn dumb_print(args: fmt::Arguments<'_>) {
if let Some(mut out) = panic_output() {
let _ = out.write_fmt(args);
}
}
#[cfg(not(feature = "stdio"))]
pub fn dumb_print(args: fmt::Arguments<'_>) {
}
// Other platforms should use the appropriate platform-specific mechanism for
// aborting the process. If no platform-specific mechanism is available,
// crate::intrinsics::abort() may be used instead. The above implementations cover
// all targets currently supported by libstd.
pub fn abort(args: fmt::Arguments<'_>) -> ! {
dumb_print(format_args!("fatal runtime error: {}\n", args));
unsafe {
crate::sys::abort_internal();
}
}
| 34.891304 | 83 | 0.711526 |
5d23d8ee854dac726de6a764ba545c88e7d7e817 | 12,758 | pub mod infer;
mod patterns;
use super::*;
#[cfg(feature = "dtype-date")]
use crate::chunkedarray::date::naive_date_to_date;
#[cfg(feature = "dtype-time")]
use crate::chunkedarray::time::time_to_time64ns;
use chrono::ParseError;
pub use patterns::Pattern;
#[cfg(feature = "dtype-time")]
fn time_pattern<F, K>(val: &str, convert: F) -> Option<&'static str>
// (string, fmt) -> result
where
F: Fn(&str, &str) -> chrono::ParseResult<K>,
{
for fmt in ["%T", "%T%.3f", "%T%.6f", "%T%.9f"] {
if convert(val, fmt).is_ok() {
return Some(fmt);
}
}
None
}
fn datetime_pattern<F, K>(val: &str, convert: F) -> Option<&'static str>
// (string, fmt) -> result
where
F: Fn(&str, &str) -> chrono::ParseResult<K>,
{
for fmt in [
// 21/12/31 12:54:98
"%y/%m/%d %H:%M:%S",
// 2021-12-31 24:58:01
"%Y-%m-%d %H:%M:%S",
// 21/12/31 24:58:01
"%y/%m/%d %H:%M:%S",
//210319 23:58:50
"%y%m%d %H:%M:%S",
// 2019-04-18T02:45:55
// 2021/12/31 12:54:98
"%Y/%m/%d %H:%M:%S",
// 2021-12-31 24:58:01
"%Y-%m-%d %H:%M:%S",
// 2021/12/31 24:58:01
"%Y/%m/%d %H:%M:%S",
// 20210319 23:58:50
"%Y%m%d %H:%M:%S",
// 2019-04-18T02:45:55
"%FT%H:%M:%S",
// 2019-04-18T02:45:55.555000000
// microseconds
"%FT%H:%M:%S.%6f",
// nanoseconds
"%FT%H:%M:%S.%9f",
] {
if convert(val, fmt).is_ok() {
return Some(fmt);
}
}
None
}
fn date_pattern<F, K>(val: &str, convert: F) -> Option<&'static str>
// (string, fmt) -> result
where
F: Fn(&str, &str) -> chrono::ParseResult<K>,
{
for fmt in [
// 2021-12-31
"%Y-%m-%d", // 31-12-2021
"%d-%m-%Y", // 2021319 (2021-03-19)
"%Y%m%d",
] {
if convert(val, fmt).is_ok() {
return Some(fmt);
}
}
None
}
struct ParseErrorByteCopy(ParseErrorKind);
impl From<ParseError> for ParseErrorByteCopy {
fn from(e: ParseError) -> Self {
// we need to do this until chrono ParseErrorKind is public
// blocked by https://github.com/chronotope/chrono/pull/588
unsafe { std::mem::transmute(e) }
}
}
#[allow(dead_code)]
enum ParseErrorKind {
OutOfRange,
Impossible,
NotEnough,
Invalid,
/// The input string has been prematurely ended.
TooShort,
TooLong,
BadFormat,
}
pub trait Utf8Methods {
#[cfg(feature = "dtype-time")]
/// Parsing string values and return a [`TimeChunked`]
fn as_time(&self, fmt: Option<&str>) -> Result<TimeChunked>;
#[cfg(feature = "dtype-date")]
/// Parsing string values and return a [`DateChunked`]
/// Different from `as_date` this function allows matches that not contain the whole string
/// e.g. "foo-2021-01-01-bar" could match "2021-01-01"
fn as_date_not_exact(&self, fmt: Option<&str>) -> Result<DateChunked>;
#[cfg(feature = "dtype-datetime")]
/// Parsing string values and return a [`DatetimeChunked`]
/// Different from `as_datetime` this function allows matches that not contain the whole string
/// e.g. "foo-2021-01-01-bar" could match "2021-01-01"
fn as_datetime_not_exact(&self, fmt: Option<&str>, tu: TimeUnit) -> Result<DatetimeChunked>;
#[cfg(feature = "dtype-date")]
/// Parsing string values and return a [`DateChunked`]
fn as_date(&self, fmt: Option<&str>) -> Result<DateChunked>;
#[cfg(feature = "dtype-datetime")]
/// Parsing string values and return a [`DatetimeChunked`]
fn as_datetime(&self, fmt: Option<&str>, tu: TimeUnit) -> Result<DatetimeChunked>;
}
fn get_first_val(ca: &Utf8Chunked) -> Result<&str> {
let idx = match ca.first_non_null() {
Some(idx) => idx,
None => {
return Err(PolarsError::ComputeError(
"Cannot determine date parsing format, all values are null".into(),
))
}
};
let val = ca.get(idx).expect("should not be null");
Ok(val)
}
#[cfg(feature = "dtype-datetime")]
fn sniff_fmt_datetime(ca_utf8: &Utf8Chunked) -> Result<&'static str> {
let val = get_first_val(ca_utf8)?;
if let Some(pattern) = datetime_pattern(val, NaiveDateTime::parse_from_str) {
return Ok(pattern);
}
Err(PolarsError::ComputeError(
"Could not find an appropriate format to parse dates, please define a fmt".into(),
))
}
#[cfg(feature = "dtype-date")]
fn sniff_fmt_date(ca_utf8: &Utf8Chunked) -> Result<&'static str> {
let val = get_first_val(ca_utf8)?;
if let Some(pattern) = date_pattern(val, NaiveDate::parse_from_str) {
return Ok(pattern);
}
Err(PolarsError::ComputeError(
"Could not find an appropriate format to parse dates, please define a fmt".into(),
))
}
#[cfg(feature = "dtype-time")]
fn sniff_fmt_time(ca_utf8: &Utf8Chunked) -> Result<&'static str> {
let val = get_first_val(ca_utf8)?;
if let Some(pattern) = time_pattern(val, NaiveTime::parse_from_str) {
return Ok(pattern);
}
Err(PolarsError::ComputeError(
"Could not find an appropriate format to parse times, please define a fmt".into(),
))
}
impl Utf8Methods for Utf8Chunked {
#[cfg(feature = "dtype-time")]
/// Parsing string values and return a [`TimeChunked`]
fn as_time(&self, fmt: Option<&str>) -> Result<TimeChunked> {
let fmt = match fmt {
Some(fmt) => fmt,
None => sniff_fmt_time(self)?,
};
let mut ca: Int64Chunked = match self.has_validity() {
false => self
.into_no_null_iter()
.map(|s| {
NaiveTime::parse_from_str(s, fmt)
.ok()
.as_ref()
.map(time_to_time64ns)
})
.collect_trusted(),
_ => self
.into_iter()
.map(|opt_s| {
let opt_nd = opt_s.map(|s| {
NaiveTime::parse_from_str(s, fmt)
.ok()
.as_ref()
.map(time_to_time64ns)
});
match opt_nd {
None => None,
Some(None) => None,
Some(Some(nd)) => Some(nd),
}
})
.collect_trusted(),
};
ca.rename(self.name());
Ok(ca.into())
}
#[cfg(feature = "dtype-date")]
/// Parsing string values and return a [`DateChunked`]
/// Different from `as_date` this function allows matches that not contain the whole string
/// e.g. "foo-2021-01-01-bar" could match "2021-01-01"
fn as_date_not_exact(&self, fmt: Option<&str>) -> Result<DateChunked> {
let fmt = match fmt {
Some(fmt) => fmt,
None => sniff_fmt_date(self)?,
};
let mut ca: Int32Chunked = self
.into_iter()
.map(|opt_s| match opt_s {
None => None,
Some(mut s) => {
let fmt_len = fmt.len();
for i in 1..(s.len() - fmt_len) {
if s.is_empty() {
return None;
}
match NaiveDate::parse_from_str(s, fmt).map(naive_date_to_date) {
Ok(nd) => return Some(nd),
Err(e) => {
let e: ParseErrorByteCopy = e.into();
match e.0 {
ParseErrorKind::TooLong => {
s = &s[..s.len() - 1];
}
_ => {
s = &s[i..];
}
}
}
}
}
None
}
})
.collect_trusted();
ca.rename(self.name());
Ok(ca.into())
}
#[cfg(feature = "dtype-datetime")]
/// Parsing string values and return a [`DatetimeChunked`]
/// Different from `as_datetime` this function allows matches that not contain the whole string
/// e.g. "foo-2021-01-01-bar" could match "2021-01-01"
fn as_datetime_not_exact(&self, fmt: Option<&str>, tu: TimeUnit) -> Result<DatetimeChunked> {
let fmt = match fmt {
Some(fmt) => fmt,
None => sniff_fmt_datetime(self)?,
};
let func = match tu {
TimeUnit::Nanoseconds => datetime_to_timestamp_ns,
TimeUnit::Microseconds => datetime_to_timestamp_us,
TimeUnit::Milliseconds => datetime_to_timestamp_ms,
};
let mut ca: Int64Chunked = self
.into_iter()
.map(|opt_s| match opt_s {
None => None,
Some(mut s) => {
let fmt_len = fmt.len();
for i in 1..(s.len() - fmt_len) {
if s.is_empty() {
return None;
}
match NaiveDateTime::parse_from_str(s, fmt).map(func) {
Ok(nd) => return Some(nd),
Err(e) => {
let e: ParseErrorByteCopy = e.into();
match e.0 {
ParseErrorKind::TooLong => {
s = &s[..s.len() - 1];
}
_ => {
s = &s[i..];
}
}
}
}
}
None
}
})
.collect_trusted();
ca.rename(self.name());
Ok(ca.into_datetime(tu, None))
}
#[cfg(feature = "dtype-date")]
/// Parsing string values and return a [`DateChunked`]
fn as_date(&self, fmt: Option<&str>) -> Result<DateChunked> {
let fmt = match fmt {
Some(fmt) => fmt,
None => sniff_fmt_date(self)?,
};
let mut ca: Int32Chunked = match self.has_validity() {
false => self
.into_no_null_iter()
.map(|s| {
NaiveDate::parse_from_str(s, fmt)
.ok()
.map(naive_date_to_date)
})
.collect_trusted(),
_ => self
.into_iter()
.map(|opt_s| {
let opt_nd = opt_s.map(|s| {
NaiveDate::parse_from_str(s, fmt)
.ok()
.map(naive_date_to_date)
});
match opt_nd {
None => None,
Some(None) => None,
Some(Some(nd)) => Some(nd),
}
})
.collect_trusted(),
};
ca.rename(self.name());
Ok(ca.into())
}
#[cfg(feature = "dtype-datetime")]
/// Parsing string values and return a [`DatetimeChunked`]
fn as_datetime(&self, fmt: Option<&str>, tu: TimeUnit) -> Result<DatetimeChunked> {
let fmt = match fmt {
Some(fmt) => fmt,
None => sniff_fmt_datetime(self)?,
};
let func = match tu {
TimeUnit::Nanoseconds => datetime_to_timestamp_ns,
TimeUnit::Microseconds => datetime_to_timestamp_us,
TimeUnit::Milliseconds => datetime_to_timestamp_ms,
};
let mut ca: Int64Chunked = match self.has_validity() {
false => self
.into_no_null_iter()
.map(|s| NaiveDateTime::parse_from_str(s, fmt).ok().map(func))
.collect_trusted(),
_ => self
.into_iter()
.map(|opt_s| {
let opt_nd =
opt_s.map(|s| NaiveDateTime::parse_from_str(s, fmt).ok().map(func));
match opt_nd {
None => None,
Some(None) => None,
Some(Some(nd)) => Some(nd),
}
})
.collect_trusted(),
};
ca.rename(self.name());
Ok(ca.into_datetime(tu, None))
}
}
| 33.397906 | 99 | 0.467001 |
2962eabbc4a8a94a0b7ae43b3677eafb1bec3805 | 14,204 | // This pallet use The Open Runtime Module Library (ORML) which is a community maintained collection of Substrate runtime modules.
// Thanks to all contributors of orml.
// Ref: https://github.com/open-web3-stack/open-runtime-module-library
#![cfg_attr(not(feature = "std"), no_std)]
// Disable the following two lints since they originate from an external macro (namely decl_storage)
#![allow(clippy::string_lit_as_bytes)]
use frame_support::{
decl_error, decl_event, decl_module, decl_storage, ensure,
traits::{Currency, ExistenceRequirement, Get, ReservableCurrency},
IterableStorageDoubleMap, Parameter,
};
use codec::{Decode, Encode};
use sp_runtime::{
traits::{AtLeast32BitUnsigned, Bounded, MaybeSerializeDeserialize, Member, One, Zero},
DispatchError, DispatchResult, RuntimeDebug,
};
use frame_system::{self as system, ensure_signed};
use orml_nft::Module as NftModule;
use pallet_nft::NftClassData;
mod auction;
pub use crate::auction::{Auction, AuctionHandler, Change, OnNewBidResult};
#[cfg(test)]
mod tests;
pub struct AuctionLogicHandler;
#[cfg_attr(feature = "std", derive(PartialEq, Eq))]
#[derive(Encode, Decode, Clone, RuntimeDebug)]
pub struct AuctionItem<AccountId, BlockNumber, Balance, AssetId, ClassId> {
asset_id: AssetId,
class_id: ClassId,
recipient: AccountId,
initial_amount: Balance,
/// Current amount for sale
amount: Balance,
/// Auction start time
start_time: BlockNumber,
end_time: BlockNumber,
}
/// Auction info.
#[cfg_attr(feature = "std", derive(PartialEq, Eq))]
#[derive(Encode, Decode, Clone, RuntimeDebug)]
pub struct AuctionInfo<AccountId, Balance, BlockNumber> {
/// Current bidder and bid price.
pub bid: Option<(AccountId, Balance)>,
/// Define which block this auction will be started.
pub start: BlockNumber,
/// Define which block this auction will be ended.
pub end: Option<BlockNumber>,
}
type ClassIdOf<T> = <T as orml_nft::Config>::ClassId;
type TokenIdOf<T> = <T as orml_nft::Config>::TokenId;
type BalanceOf<T> =
<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
pub trait Config:
frame_system::Config
+ orml_nft::Config<ClassData = NftClassData<BalanceOf<Self>>>
+ pallet_balances::Config
{
type Event: From<Event<Self>> + Into<<Self as frame_system::Config>::Event>;
type AuctionTimeToClose: Get<Self::BlockNumber>;
/// The auction ID type
type AuctionId: Parameter
+ Member
+ AtLeast32BitUnsigned
+ Default
+ Copy
+ MaybeSerializeDeserialize
+ Bounded;
/// The `AuctionHandler` that allow custom bidding logic and handles auction
/// result
type Handler: AuctionHandler<Self::AccountId, Self::Balance, Self::BlockNumber, Self::AuctionId>;
type Currency: Currency<Self::AccountId>;
// /// Weight information for extrinsics in this module.
// type WeightInfo: WeightInfo;
}
decl_storage! {
trait Store for Module<T: Config> as Auction {
/// Stores on-going and future auctions. Closed auction are removed.
pub Auctions get(fn auctions): map hasher(twox_64_concat) T::AuctionId => Option<AuctionInfo<T::AccountId, T::Balance, T::BlockNumber>>;
//Store asset with Auction
pub AuctionItems get(fn get_auction_item): map hasher(twox_64_concat) T::AuctionId => Option<AuctionItem<T::AccountId, T::BlockNumber, T::Balance, TokenIdOf<T>, ClassIdOf<T>>>;
/// Track the next auction ID.
pub AuctionsIndex get(fn auctions_index): T::AuctionId;
/// Index auctions by end time.
pub AuctionEndTime get(fn auction_end_time): double_map hasher(twox_64_concat) T::BlockNumber, hasher(twox_64_concat) T::AuctionId => Option<()>;
}
}
decl_event!(
pub enum Event<T> where
<T as frame_system::Config>::AccountId,
<T as pallet_balances::Config>::Balance,
// AssetId = AssetId,
<T as Config>::AuctionId,
{
/// A bid is placed. [auction_id, bidder, bidding_amount]
Bid(AuctionId, AccountId, Balance),
NewAuctionItem(AuctionId, AccountId ,Balance, Balance),
AuctionFinalized(AuctionId, AccountId, Balance),
}
);
decl_module! {
pub struct Module<T: Config> for enum Call where origin: T::Origin {
type Error = Error<T>;
fn deposit_event() = default;
/// The extended time for the auction to end after each successful bid
const AuctionTimeToClose: T::BlockNumber = T::AuctionTimeToClose::get();
#[weight = 10_000]
fn bid(origin, id: T::AuctionId, value: T::Balance) {
let from = ensure_signed(origin)?;
<Auctions<T>>::try_mutate_exists(id, |auction| -> DispatchResult {
let mut auction = auction.as_mut().ok_or(Error::<T>::AuctionNotExist)?;
let block_number = <frame_system::Module<T>>::block_number();
// make sure auction is started
ensure!(block_number >= auction.start, Error::<T>::AuctionNotStarted);
let auction_end: Option<T::BlockNumber> = auction.end;
ensure!(block_number < auction_end.unwrap(), Error::<T>::AuctionIsExpired);
if let Some(ref current_bid) = auction.bid {
ensure!(value > current_bid.1, Error::<T>::InvalidBidPrice);
} else {
ensure!(!value.is_zero(), Error::<T>::InvalidBidPrice);
}
let bid_result = T::Handler::on_new_bid(
block_number,
id,
(from.clone(), value),
auction.bid.clone(),
);
ensure!(bid_result.accept_bid, Error::<T>::BidNotAccepted);
ensure!(<pallet_balances::Module<T>>::free_balance(&from) >= value, "You don't have enough free balance for this bid");
Self::auction_bid_handler(block_number, id, (from.clone(), value), auction.bid.clone())?;
auction.bid = Some((from.clone(), value));
Self::deposit_event(RawEvent::Bid(id, from, value));
Ok(())
})?;
}
#[weight = 10_000]
fn create_auction(origin, asset: (ClassIdOf<T>, TokenIdOf<T>), value: T::Balance) {
let from = ensure_signed(origin)?;
//Check ownership
let class_info = NftModule::<T>::classes(asset.0).ok_or(Error::<T>::NoPermissionToCreateAuction)?;
ensure!(from == class_info.owner, Error::<T>::NoPermissionToCreateAuction);
let class_info_data = class_info.data;
ensure!(class_info_data.token_type.is_transferrable(), Error::<T>::NoPermissionToCreateAuction);
let start_time = <system::Module<T>>::block_number();
let end_time: T::BlockNumber = start_time + T::AuctionTimeToClose::get(); //add 7 days block for default auction
let auction_id = Self::new_auction(from.clone(), value, start_time, Some(end_time))?;
let new_auction_item = AuctionItem {
asset_id : asset.1,
class_id: asset.0,
recipient : from.clone(),
initial_amount : value,
amount : value,
start_time : start_time,
end_time: end_time
};
<AuctionItems<T>>::insert(
auction_id,
new_auction_item
);
Self::deposit_event(RawEvent::NewAuctionItem(auction_id, from, value ,value));
}
/// dummy `on_initialize` to return the weight used in `on_finalize`.
// fn on_initialize(now: T::BlockNumber) -> Weight {
// T::WeightInfo::on_finalize(<AuctionEndTime<T>>::iter_prefix(&now).count() as u32)
// }
fn on_finalize(now: T::BlockNumber) {
for (auction_id, _) in <AuctionEndTime<T>>::drain_prefix(&now) {
if let Some(auction) = <Auctions<T>>::get(&auction_id) {
if let Some(auction_item) = <AuctionItems<T>>::get(&auction_id){
Self::remove_auction(auction_id.clone());
//Transfer balance from high bidder to asset owner
if let Some(current_bid) = auction.bid{
let (high_bidder, high_bid_price): (T::AccountId, T::Balance) = current_bid;
<pallet_balances::Module<T>>::unreserve(&high_bidder, high_bid_price);
let currency_transfer = <pallet_balances::Module<T> as Currency<_>>::transfer(&high_bidder, &auction_item.recipient , high_bid_price, ExistenceRequirement::KeepAlive);
match currency_transfer {
Err(_e) => continue,
Ok(_v) => {
//Transfer asset from asset owner to high bidder
let asset_transfer = NftModule::<T>::transfer(&auction_item.recipient, &high_bidder, (auction_item.class_id, auction_item.asset_id));
match asset_transfer {
Err(_) => continue,
Ok(_) => {
Self::deposit_event(RawEvent::AuctionFinalized(auction_id, high_bidder ,high_bid_price));
},
}
},
}
}
}
}
}
}
}
}
decl_error! {
/// Error for auction module.
pub enum Error for Module<T: Config> {
AuctionNotExist,
AuctionNotStarted,
AuctionIsExpired,
BidNotAccepted,
InvalidBidPrice,
NoAvailableAuctionId,
NoPermissionToCreateAuction,
}
}
impl<T: Config> Module<T> {
fn update_auction(
id: T::AuctionId,
info: AuctionInfo<T::AccountId, T::Balance, T::BlockNumber>,
) -> DispatchResult {
let auction = <Auctions<T>>::get(id).ok_or(Error::<T>::AuctionNotExist)?;
if let Some(old_end) = auction.end {
<AuctionEndTime<T>>::remove(&old_end, id);
}
if let Some(new_end) = info.end {
<AuctionEndTime<T>>::insert(&new_end, id, ());
}
<Auctions<T>>::insert(id, info);
Ok(())
}
fn new_auction(
_recipient: T::AccountId,
_initial_amount: T::Balance,
start: T::BlockNumber,
end: Option<T::BlockNumber>,
) -> Result<T::AuctionId, DispatchError> {
let auction: AuctionInfo<T::AccountId, T::Balance, T::BlockNumber> = AuctionInfo {
bid: None,
start,
end,
};
let auction_id: T::AuctionId =
<AuctionsIndex<T>>::try_mutate(|n| -> Result<T::AuctionId, DispatchError> {
let id = *n;
ensure!(
id != T::AuctionId::max_value(),
Error::<T>::NoAvailableAuctionId
);
*n += One::one();
Ok(id)
})?;
<Auctions<T>>::insert(auction_id, auction);
if let Some(end_block) = end {
<AuctionEndTime<T>>::insert(&end_block, auction_id, ());
}
Ok(auction_id)
}
fn remove_auction(id: T::AuctionId) {
if let Some(auction) = <Auctions<T>>::get(&id) {
if let Some(end_block) = auction.end {
<AuctionEndTime<T>>::remove(end_block, id);
<Auctions<T>>::remove(&id)
}
}
}
/// increment `new_bidder` reference and decrement `last_bidder` reference
/// if any
fn swap_bidders(new_bidder: &T::AccountId, last_bidder: Option<&T::AccountId>) {
system::Module::<T>::inc_consumers(new_bidder);
if let Some(who) = last_bidder {
system::Module::<T>::dec_consumers(who);
}
}
fn auction_bid_handler(
_now: T::BlockNumber,
id: T::AuctionId,
new_bid: (T::AccountId, T::Balance),
last_bid: Option<(T::AccountId, T::Balance)>,
) -> DispatchResult {
let (new_bidder, new_bid_price) = new_bid;
ensure!(!new_bid_price.is_zero(), Error::<T>::InvalidBidPrice);
<AuctionItems<T>>::try_mutate_exists(id, |auction_item| -> DispatchResult {
let mut auction_item = auction_item.as_mut().ok_or("Auction is not exists")?;
let last_bid_price = last_bid.clone().map_or(Zero::zero(), |(_, price)| price); //get last bid price
let last_bidder = last_bid.as_ref().map(|(who, _)| who);
if let Some(last_bidder) = last_bidder {
//unlock reserve amount
if !last_bid_price.is_zero() {
//Unreserve balance of last bidder
<pallet_balances::Module<T>>::unreserve(&last_bidder, last_bid_price);
}
}
//Lock fund of new bidder
//Reserve balance
<pallet_balances::Module<T>>::reserve(&new_bidder, new_bid_price)?;
auction_item.recipient = new_bidder.clone();
auction_item.amount = new_bid_price.clone();
Ok(())
})
}
}
// impl<T: Config> Auction<T::AccountId, T::BlockNumber> for Module<T> {
// type AuctionId = T::AuctionId;
// type Balance = T::Balance;
// }
impl<T: Config> AuctionHandler<T::AccountId, T::Balance, T::BlockNumber, T::AuctionId>
for Module<T>
{
fn on_new_bid(
_now: T::BlockNumber,
_id: T::AuctionId,
_new_bid: (T::AccountId, T::Balance),
_last_bid: Option<(T::AccountId, T::Balance)>,
) -> OnNewBidResult<T::BlockNumber> {
OnNewBidResult {
accept_bid: true,
auction_end_change: Change::NoChange,
}
}
fn on_auction_ended(_id: T::AuctionId, _winner: Option<(T::AccountId, T::Balance)>) {}
}
| 37.477573 | 199 | 0.577795 |
ac6d67109967c1fd4461f6ff88d6e8e508274f2b | 78,740 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationListResult {
#[serde(skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Operation {
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
#[serde(skip_serializing_if = "Option::is_none")]
pub origin: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<OperationProperties>,
}
pub mod operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Display {
#[serde(skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationProperties {
#[serde(rename = "serviceSpecification", skip_serializing_if = "Option::is_none")]
pub service_specification: Option<ServiceSpecification>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceSpecification {
#[serde(rename = "metricSpecifications", skip_serializing_if = "Vec::is_empty")]
pub metric_specifications: Vec<MetricSpecification>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MetricSpecification {
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "displayName", skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "displayDescription", skip_serializing_if = "Option::is_none")]
pub display_description: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub unit: Option<String>,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub dimensions: Vec<Dimension>,
#[serde(rename = "aggregationType", skip_serializing_if = "Option::is_none")]
pub aggregation_type: Option<String>,
#[serde(rename = "fillGapWithZero", skip_serializing_if = "Option::is_none")]
pub fill_gap_with_zero: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub category: Option<String>,
#[serde(rename = "resourceIdDimensionNameOverride", skip_serializing_if = "Option::is_none")]
pub resource_id_dimension_name_override: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Dimension {
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "displayName", skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageAccountCheckNameAvailabilityParameters {
pub name: String,
#[serde(rename = "type")]
pub type_: storage_account_check_name_availability_parameters::Type,
}
pub mod storage_account_check_name_availability_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
#[serde(rename = "Microsoft.Storage/storageAccounts")]
MicrosoftStorageStorageAccounts,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SkuCapability {
#[serde(skip_serializing)]
pub name: Option<String>,
#[serde(skip_serializing)]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Restriction {
#[serde(rename = "type", skip_serializing)]
pub type_: Option<String>,
#[serde(skip_serializing)]
pub values: Vec<String>,
#[serde(rename = "reasonCode", skip_serializing_if = "Option::is_none")]
pub reason_code: Option<restriction::ReasonCode>,
}
pub mod restriction {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ReasonCode {
QuotaId,
NotAvailableForSubscription,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageSkuListResult {
#[serde(skip_serializing)]
pub value: Vec<SkuInformation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CheckNameAvailabilityResult {
#[serde(rename = "nameAvailable", skip_serializing)]
pub name_available: Option<bool>,
#[serde(skip_serializing)]
pub reason: Option<check_name_availability_result::Reason>,
#[serde(skip_serializing)]
pub message: Option<String>,
}
pub mod check_name_availability_result {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Reason {
AccountNameInvalid,
AlreadyExists,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SkuInformation {
pub name: SkuName,
#[serde(skip_serializing)]
pub tier: Option<Tier>,
#[serde(rename = "resourceType", skip_serializing)]
pub resource_type: Option<String>,
#[serde(skip_serializing)]
pub kind: Option<sku_information::Kind>,
#[serde(skip_serializing)]
pub locations: Vec<String>,
#[serde(skip_serializing)]
pub capabilities: Vec<SkuCapability>,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub restrictions: Vec<Restriction>,
}
pub mod sku_information {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Kind {
Storage,
StorageV2,
BlobStorage,
FileStorage,
BlockBlobStorage,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CustomDomain {
pub name: String,
#[serde(rename = "useSubDomainName", skip_serializing_if = "Option::is_none")]
pub use_sub_domain_name: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SasPolicy {
#[serde(rename = "sasExpirationPeriod")]
pub sas_expiration_period: String,
#[serde(rename = "expirationAction")]
pub expiration_action: sas_policy::ExpirationAction,
}
pub mod sas_policy {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ExpirationAction {
Log,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KeyPolicy {
#[serde(rename = "keyExpirationPeriodInDays")]
pub key_expiration_period_in_days: i32,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionService {
#[serde(skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(rename = "lastEnabledTime", skip_serializing)]
pub last_enabled_time: Option<String>,
#[serde(rename = "keyType", skip_serializing_if = "Option::is_none")]
pub key_type: Option<encryption_service::KeyType>,
}
pub mod encryption_service {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum KeyType {
Service,
Account,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionServices {
#[serde(skip_serializing_if = "Option::is_none")]
pub blob: Option<EncryptionService>,
#[serde(skip_serializing_if = "Option::is_none")]
pub file: Option<EncryptionService>,
#[serde(skip_serializing_if = "Option::is_none")]
pub table: Option<EncryptionService>,
#[serde(skip_serializing_if = "Option::is_none")]
pub queue: Option<EncryptionService>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KeyVaultProperties {
#[serde(skip_serializing_if = "Option::is_none")]
pub keyname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keyversion: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keyvaulturi: Option<String>,
#[serde(rename = "currentVersionedKeyIdentifier", skip_serializing)]
pub current_versioned_key_identifier: Option<String>,
#[serde(rename = "lastKeyRotationTimestamp", skip_serializing)]
pub last_key_rotation_timestamp: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Encryption {
#[serde(skip_serializing_if = "Option::is_none")]
pub services: Option<EncryptionServices>,
#[serde(rename = "keySource")]
pub key_source: encryption::KeySource,
#[serde(rename = "requireInfrastructureEncryption", skip_serializing_if = "Option::is_none")]
pub require_infrastructure_encryption: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keyvaultproperties: Option<KeyVaultProperties>,
#[serde(skip_serializing_if = "Option::is_none")]
pub identity: Option<EncryptionIdentity>,
}
pub mod encryption {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum KeySource {
#[serde(rename = "Microsoft.Storage")]
MicrosoftStorage,
#[serde(rename = "Microsoft.Keyvault")]
MicrosoftKeyvault,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionIdentity {
#[serde(rename = "userAssignedIdentity", skip_serializing_if = "Option::is_none")]
pub user_assigned_identity: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserAssignedIdentity {
#[serde(rename = "principalId", skip_serializing)]
pub principal_id: Option<String>,
#[serde(rename = "clientId", skip_serializing)]
pub client_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceAccessRule {
#[serde(rename = "tenantId", skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "resourceId", skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkRule {
pub id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub action: Option<virtual_network_rule::Action>,
#[serde(skip_serializing_if = "Option::is_none")]
pub state: Option<virtual_network_rule::State>,
}
pub mod virtual_network_rule {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Action {
Allow,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
#[serde(rename = "provisioning")]
Provisioning,
#[serde(rename = "deprovisioning")]
Deprovisioning,
#[serde(rename = "succeeded")]
Succeeded,
#[serde(rename = "failed")]
Failed,
#[serde(rename = "networkSourceDeleted")]
NetworkSourceDeleted,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IpRule {
pub value: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub action: Option<ip_rule::Action>,
}
pub mod ip_rule {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Action {
Allow,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkRuleSet {
#[serde(skip_serializing_if = "Option::is_none")]
pub bypass: Option<network_rule_set::Bypass>,
#[serde(rename = "resourceAccessRules", skip_serializing_if = "Vec::is_empty")]
pub resource_access_rules: Vec<ResourceAccessRule>,
#[serde(rename = "virtualNetworkRules", skip_serializing_if = "Vec::is_empty")]
pub virtual_network_rules: Vec<VirtualNetworkRule>,
#[serde(rename = "ipRules", skip_serializing_if = "Vec::is_empty")]
pub ip_rules: Vec<IpRule>,
#[serde(rename = "defaultAction")]
pub default_action: network_rule_set::DefaultAction,
}
pub mod network_rule_set {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Bypass {
None,
Logging,
Metrics,
AzureServices,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DefaultAction {
Allow,
Deny,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureFilesIdentityBasedAuthentication {
#[serde(rename = "directoryServiceOptions")]
pub directory_service_options: azure_files_identity_based_authentication::DirectoryServiceOptions,
#[serde(rename = "activeDirectoryProperties", skip_serializing_if = "Option::is_none")]
pub active_directory_properties: Option<ActiveDirectoryProperties>,
}
pub mod azure_files_identity_based_authentication {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DirectoryServiceOptions {
None,
#[serde(rename = "AADDS")]
Aadds,
#[serde(rename = "AD")]
Ad,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ActiveDirectoryProperties {
#[serde(rename = "domainName")]
pub domain_name: String,
#[serde(rename = "netBiosDomainName")]
pub net_bios_domain_name: String,
#[serde(rename = "forestName")]
pub forest_name: String,
#[serde(rename = "domainGuid")]
pub domain_guid: String,
#[serde(rename = "domainSid")]
pub domain_sid: String,
#[serde(rename = "azureStorageSid")]
pub azure_storage_sid: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageAccountPropertiesCreateParameters {
#[serde(rename = "sasPolicy", skip_serializing_if = "Option::is_none")]
pub sas_policy: Option<SasPolicy>,
#[serde(rename = "keyPolicy", skip_serializing_if = "Option::is_none")]
pub key_policy: Option<KeyPolicy>,
#[serde(rename = "customDomain", skip_serializing_if = "Option::is_none")]
pub custom_domain: Option<CustomDomain>,
#[serde(skip_serializing_if = "Option::is_none")]
pub encryption: Option<Encryption>,
#[serde(rename = "networkAcls", skip_serializing_if = "Option::is_none")]
pub network_acls: Option<NetworkRuleSet>,
#[serde(rename = "accessTier", skip_serializing_if = "Option::is_none")]
pub access_tier: Option<storage_account_properties_create_parameters::AccessTier>,
#[serde(rename = "azureFilesIdentityBasedAuthentication", skip_serializing_if = "Option::is_none")]
pub azure_files_identity_based_authentication: Option<AzureFilesIdentityBasedAuthentication>,
#[serde(rename = "supportsHttpsTrafficOnly", skip_serializing_if = "Option::is_none")]
pub supports_https_traffic_only: Option<bool>,
#[serde(rename = "isHnsEnabled", skip_serializing_if = "Option::is_none")]
pub is_hns_enabled: Option<bool>,
#[serde(rename = "largeFileSharesState", skip_serializing_if = "Option::is_none")]
pub large_file_shares_state: Option<storage_account_properties_create_parameters::LargeFileSharesState>,
#[serde(rename = "routingPreference", skip_serializing_if = "Option::is_none")]
pub routing_preference: Option<RoutingPreference>,
#[serde(rename = "allowBlobPublicAccess", skip_serializing_if = "Option::is_none")]
pub allow_blob_public_access: Option<bool>,
#[serde(rename = "minimumTlsVersion", skip_serializing_if = "Option::is_none")]
pub minimum_tls_version: Option<storage_account_properties_create_parameters::MinimumTlsVersion>,
#[serde(rename = "allowSharedKeyAccess", skip_serializing_if = "Option::is_none")]
pub allow_shared_key_access: Option<bool>,
#[serde(rename = "isNfsV3Enabled", skip_serializing_if = "Option::is_none")]
pub is_nfs_v3_enabled: Option<bool>,
}
pub mod storage_account_properties_create_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AccessTier {
Hot,
Cool,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LargeFileSharesState {
Disabled,
Enabled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum MinimumTlsVersion {
#[serde(rename = "TLS1_0")]
Tls10,
#[serde(rename = "TLS1_1")]
Tls11,
#[serde(rename = "TLS1_2")]
Tls12,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Identity {
#[serde(rename = "principalId", skip_serializing)]
pub principal_id: Option<String>,
#[serde(rename = "tenantId", skip_serializing)]
pub tenant_id: Option<String>,
#[serde(rename = "type")]
pub type_: identity::Type,
#[serde(rename = "userAssignedIdentities", skip_serializing_if = "Option::is_none")]
pub user_assigned_identities: Option<serde_json::Value>,
}
pub mod identity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
None,
SystemAssigned,
UserAssigned,
#[serde(rename = "SystemAssigned,UserAssigned")]
SystemAssignedUserAssigned,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ExtendedLocationType {
EdgeZone,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExtendedLocation {
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
pub type_: Option<ExtendedLocationType>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageAccountCreateParameters {
pub sku: Sku,
pub kind: storage_account_create_parameters::Kind,
pub location: String,
#[serde(rename = "extendedLocation", skip_serializing_if = "Option::is_none")]
pub extended_location: Option<ExtendedLocation>,
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub identity: Option<Identity>,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<StorageAccountPropertiesCreateParameters>,
}
pub mod storage_account_create_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Kind {
Storage,
StorageV2,
BlobStorage,
FileStorage,
BlockBlobStorage,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Endpoints {
#[serde(skip_serializing)]
pub blob: Option<String>,
#[serde(skip_serializing)]
pub queue: Option<String>,
#[serde(skip_serializing)]
pub table: Option<String>,
#[serde(skip_serializing)]
pub file: Option<String>,
#[serde(skip_serializing)]
pub web: Option<String>,
#[serde(skip_serializing)]
pub dfs: Option<String>,
#[serde(rename = "microsoftEndpoints", skip_serializing_if = "Option::is_none")]
pub microsoft_endpoints: Option<StorageAccountMicrosoftEndpoints>,
#[serde(rename = "internetEndpoints", skip_serializing_if = "Option::is_none")]
pub internet_endpoints: Option<StorageAccountInternetEndpoints>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageAccountMicrosoftEndpoints {
#[serde(skip_serializing)]
pub blob: Option<String>,
#[serde(skip_serializing)]
pub queue: Option<String>,
#[serde(skip_serializing)]
pub table: Option<String>,
#[serde(skip_serializing)]
pub file: Option<String>,
#[serde(skip_serializing)]
pub web: Option<String>,
#[serde(skip_serializing)]
pub dfs: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageAccountInternetEndpoints {
#[serde(skip_serializing)]
pub blob: Option<String>,
#[serde(skip_serializing)]
pub file: Option<String>,
#[serde(skip_serializing)]
pub web: Option<String>,
#[serde(skip_serializing)]
pub dfs: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GeoReplicationStats {
#[serde(skip_serializing)]
pub status: Option<geo_replication_stats::Status>,
#[serde(rename = "lastSyncTime", skip_serializing)]
pub last_sync_time: Option<String>,
#[serde(rename = "canFailover", skip_serializing)]
pub can_failover: Option<bool>,
}
pub mod geo_replication_stats {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Live,
Bootstrap,
Unavailable,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoutingPreference {
#[serde(rename = "routingChoice", skip_serializing_if = "Option::is_none")]
pub routing_choice: Option<routing_preference::RoutingChoice>,
#[serde(rename = "publishMicrosoftEndpoints", skip_serializing_if = "Option::is_none")]
pub publish_microsoft_endpoints: Option<bool>,
#[serde(rename = "publishInternetEndpoints", skip_serializing_if = "Option::is_none")]
pub publish_internet_endpoints: Option<bool>,
}
pub mod routing_preference {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RoutingChoice {
MicrosoftRouting,
InternetRouting,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobRestoreStatus {
#[serde(skip_serializing)]
pub status: Option<blob_restore_status::Status>,
#[serde(rename = "failureReason", skip_serializing)]
pub failure_reason: Option<String>,
#[serde(rename = "restoreId", skip_serializing)]
pub restore_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub parameters: Option<BlobRestoreParameters>,
}
pub mod blob_restore_status {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
InProgress,
Complete,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeletedAccountProperties {
#[serde(rename = "storageAccountResourceId", skip_serializing)]
pub storage_account_resource_id: Option<String>,
#[serde(skip_serializing)]
pub location: Option<String>,
#[serde(rename = "restoreReference", skip_serializing)]
pub restore_reference: Option<String>,
#[serde(rename = "creationTime", skip_serializing)]
pub creation_time: Option<String>,
#[serde(rename = "deletionTime", skip_serializing)]
pub deletion_time: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageAccountProperties {
#[serde(rename = "provisioningState", skip_serializing)]
pub provisioning_state: Option<storage_account_properties::ProvisioningState>,
#[serde(rename = "primaryEndpoints", skip_serializing_if = "Option::is_none")]
pub primary_endpoints: Option<Endpoints>,
#[serde(rename = "primaryLocation", skip_serializing)]
pub primary_location: Option<String>,
#[serde(rename = "statusOfPrimary", skip_serializing)]
pub status_of_primary: Option<storage_account_properties::StatusOfPrimary>,
#[serde(rename = "lastGeoFailoverTime", skip_serializing)]
pub last_geo_failover_time: Option<String>,
#[serde(rename = "secondaryLocation", skip_serializing)]
pub secondary_location: Option<String>,
#[serde(rename = "statusOfSecondary", skip_serializing)]
pub status_of_secondary: Option<storage_account_properties::StatusOfSecondary>,
#[serde(rename = "creationTime", skip_serializing)]
pub creation_time: Option<String>,
#[serde(rename = "customDomain", skip_serializing_if = "Option::is_none")]
pub custom_domain: Option<CustomDomain>,
#[serde(rename = "sasPolicy", skip_serializing_if = "Option::is_none")]
pub sas_policy: Option<SasPolicy>,
#[serde(rename = "keyPolicy", skip_serializing_if = "Option::is_none")]
pub key_policy: Option<KeyPolicy>,
#[serde(rename = "keyCreationTime", skip_serializing)]
pub key_creation_time: Option<serde_json::Value>,
#[serde(rename = "secondaryEndpoints", skip_serializing_if = "Option::is_none")]
pub secondary_endpoints: Option<Endpoints>,
#[serde(skip_serializing_if = "Option::is_none")]
pub encryption: Option<Encryption>,
#[serde(rename = "accessTier", skip_serializing)]
pub access_tier: Option<storage_account_properties::AccessTier>,
#[serde(rename = "azureFilesIdentityBasedAuthentication", skip_serializing_if = "Option::is_none")]
pub azure_files_identity_based_authentication: Option<AzureFilesIdentityBasedAuthentication>,
#[serde(rename = "supportsHttpsTrafficOnly", skip_serializing_if = "Option::is_none")]
pub supports_https_traffic_only: Option<bool>,
#[serde(rename = "networkAcls", skip_serializing_if = "Option::is_none")]
pub network_acls: Option<NetworkRuleSet>,
#[serde(rename = "isHnsEnabled", skip_serializing_if = "Option::is_none")]
pub is_hns_enabled: Option<bool>,
#[serde(rename = "geoReplicationStats", skip_serializing_if = "Option::is_none")]
pub geo_replication_stats: Option<GeoReplicationStats>,
#[serde(rename = "failoverInProgress", skip_serializing)]
pub failover_in_progress: Option<bool>,
#[serde(rename = "largeFileSharesState", skip_serializing_if = "Option::is_none")]
pub large_file_shares_state: Option<storage_account_properties::LargeFileSharesState>,
#[serde(rename = "privateEndpointConnections", skip_serializing)]
pub private_endpoint_connections: Vec<PrivateEndpointConnection>,
#[serde(rename = "routingPreference", skip_serializing_if = "Option::is_none")]
pub routing_preference: Option<RoutingPreference>,
#[serde(rename = "blobRestoreStatus", skip_serializing_if = "Option::is_none")]
pub blob_restore_status: Option<BlobRestoreStatus>,
#[serde(rename = "allowBlobPublicAccess", skip_serializing_if = "Option::is_none")]
pub allow_blob_public_access: Option<bool>,
#[serde(rename = "minimumTlsVersion", skip_serializing_if = "Option::is_none")]
pub minimum_tls_version: Option<storage_account_properties::MinimumTlsVersion>,
#[serde(rename = "allowSharedKeyAccess", skip_serializing_if = "Option::is_none")]
pub allow_shared_key_access: Option<bool>,
#[serde(rename = "isNfsV3Enabled", skip_serializing_if = "Option::is_none")]
pub is_nfs_v3_enabled: Option<bool>,
}
pub mod storage_account_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Creating,
#[serde(rename = "ResolvingDNS")]
ResolvingDns,
Succeeded,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum StatusOfPrimary {
#[serde(rename = "available")]
Available,
#[serde(rename = "unavailable")]
Unavailable,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum StatusOfSecondary {
#[serde(rename = "available")]
Available,
#[serde(rename = "unavailable")]
Unavailable,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AccessTier {
Hot,
Cool,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LargeFileSharesState {
Disabled,
Enabled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum MinimumTlsVersion {
#[serde(rename = "TLS1_0")]
Tls10,
#[serde(rename = "TLS1_1")]
Tls11,
#[serde(rename = "TLS1_2")]
Tls12,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeletedAccount {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<DeletedAccountProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageAccount {
#[serde(flatten)]
pub tracked_resource: TrackedResource,
#[serde(skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(skip_serializing)]
pub kind: Option<storage_account::Kind>,
#[serde(skip_serializing_if = "Option::is_none")]
pub identity: Option<Identity>,
#[serde(rename = "extendedLocation", skip_serializing_if = "Option::is_none")]
pub extended_location: Option<ExtendedLocation>,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<StorageAccountProperties>,
}
pub mod storage_account {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Kind {
Storage,
StorageV2,
BlobStorage,
FileStorage,
BlockBlobStorage,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageAccountKey {
#[serde(rename = "keyName", skip_serializing)]
pub key_name: Option<String>,
#[serde(skip_serializing)]
pub value: Option<String>,
#[serde(skip_serializing)]
pub permissions: Option<storage_account_key::Permissions>,
#[serde(rename = "creationTime", skip_serializing)]
pub creation_time: Option<String>,
}
pub mod storage_account_key {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Permissions {
Read,
Full,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageAccountListResult {
#[serde(skip_serializing)]
pub value: Vec<StorageAccount>,
#[serde(rename = "nextLink", skip_serializing)]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeletedAccountListResult {
#[serde(skip_serializing)]
pub value: Vec<DeletedAccount>,
#[serde(rename = "nextLink", skip_serializing)]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageAccountListKeysResult {
#[serde(skip_serializing)]
pub keys: Vec<StorageAccountKey>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageAccountRegenerateKeyParameters {
#[serde(rename = "keyName")]
pub key_name: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageAccountPropertiesUpdateParameters {
#[serde(rename = "customDomain", skip_serializing_if = "Option::is_none")]
pub custom_domain: Option<CustomDomain>,
#[serde(skip_serializing_if = "Option::is_none")]
pub encryption: Option<Encryption>,
#[serde(rename = "sasPolicy", skip_serializing_if = "Option::is_none")]
pub sas_policy: Option<SasPolicy>,
#[serde(rename = "keyPolicy", skip_serializing_if = "Option::is_none")]
pub key_policy: Option<KeyPolicy>,
#[serde(rename = "accessTier", skip_serializing_if = "Option::is_none")]
pub access_tier: Option<storage_account_properties_update_parameters::AccessTier>,
#[serde(rename = "azureFilesIdentityBasedAuthentication", skip_serializing_if = "Option::is_none")]
pub azure_files_identity_based_authentication: Option<AzureFilesIdentityBasedAuthentication>,
#[serde(rename = "supportsHttpsTrafficOnly", skip_serializing_if = "Option::is_none")]
pub supports_https_traffic_only: Option<bool>,
#[serde(rename = "networkAcls", skip_serializing_if = "Option::is_none")]
pub network_acls: Option<NetworkRuleSet>,
#[serde(rename = "largeFileSharesState", skip_serializing_if = "Option::is_none")]
pub large_file_shares_state: Option<storage_account_properties_update_parameters::LargeFileSharesState>,
#[serde(rename = "routingPreference", skip_serializing_if = "Option::is_none")]
pub routing_preference: Option<RoutingPreference>,
#[serde(rename = "allowBlobPublicAccess", skip_serializing_if = "Option::is_none")]
pub allow_blob_public_access: Option<bool>,
#[serde(rename = "minimumTlsVersion", skip_serializing_if = "Option::is_none")]
pub minimum_tls_version: Option<storage_account_properties_update_parameters::MinimumTlsVersion>,
#[serde(rename = "allowSharedKeyAccess", skip_serializing_if = "Option::is_none")]
pub allow_shared_key_access: Option<bool>,
}
pub mod storage_account_properties_update_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AccessTier {
Hot,
Cool,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LargeFileSharesState {
Disabled,
Enabled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum MinimumTlsVersion {
#[serde(rename = "TLS1_0")]
Tls10,
#[serde(rename = "TLS1_1")]
Tls11,
#[serde(rename = "TLS1_2")]
Tls12,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageAccountUpdateParameters {
#[serde(skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub identity: Option<Identity>,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<StorageAccountPropertiesUpdateParameters>,
#[serde(skip_serializing_if = "Option::is_none")]
pub kind: Option<storage_account_update_parameters::Kind>,
}
pub mod storage_account_update_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Kind {
Storage,
StorageV2,
BlobStorage,
FileStorage,
BlockBlobStorage,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobRestoreParameters {
#[serde(rename = "timeToRestore")]
pub time_to_restore: String,
#[serde(rename = "blobRanges")]
pub blob_ranges: Vec<BlobRestoreRange>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobRestoreRange {
#[serde(rename = "startRange")]
pub start_range: String,
#[serde(rename = "endRange")]
pub end_range: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UsageName {
#[serde(skip_serializing)]
pub value: Option<String>,
#[serde(rename = "localizedValue", skip_serializing)]
pub localized_value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Usage {
#[serde(skip_serializing)]
pub unit: Option<usage::Unit>,
#[serde(rename = "currentValue", skip_serializing)]
pub current_value: Option<i32>,
#[serde(skip_serializing)]
pub limit: Option<i32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<UsageName>,
}
pub mod usage {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
Bytes,
Seconds,
Percent,
CountsPerSecond,
BytesPerSecond,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UsageListResult {
#[serde(skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Usage>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccountSasParameters {
#[serde(rename = "signedServices")]
pub signed_services: account_sas_parameters::SignedServices,
#[serde(rename = "signedResourceTypes")]
pub signed_resource_types: account_sas_parameters::SignedResourceTypes,
#[serde(rename = "signedPermission")]
pub signed_permission: account_sas_parameters::SignedPermission,
#[serde(rename = "signedIp", skip_serializing_if = "Option::is_none")]
pub signed_ip: Option<String>,
#[serde(rename = "signedProtocol", skip_serializing_if = "Option::is_none")]
pub signed_protocol: Option<account_sas_parameters::SignedProtocol>,
#[serde(rename = "signedStart", skip_serializing_if = "Option::is_none")]
pub signed_start: Option<String>,
#[serde(rename = "signedExpiry")]
pub signed_expiry: String,
#[serde(rename = "keyToSign", skip_serializing_if = "Option::is_none")]
pub key_to_sign: Option<String>,
}
pub mod account_sas_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SignedServices {
#[serde(rename = "b")]
B,
#[serde(rename = "q")]
Q,
#[serde(rename = "t")]
T,
#[serde(rename = "f")]
F,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SignedResourceTypes {
#[serde(rename = "s")]
S,
#[serde(rename = "c")]
C,
#[serde(rename = "o")]
O,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SignedPermission {
#[serde(rename = "r")]
R,
#[serde(rename = "d")]
D,
#[serde(rename = "w")]
W,
#[serde(rename = "l")]
L,
#[serde(rename = "a")]
A,
#[serde(rename = "c")]
C,
#[serde(rename = "u")]
U,
#[serde(rename = "p")]
P,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SignedProtocol {
#[serde(rename = "https,http")]
HttpsHttp,
#[serde(rename = "https")]
Https,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListAccountSasResponse {
#[serde(rename = "accountSasToken", skip_serializing)]
pub account_sas_token: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceSasParameters {
#[serde(rename = "canonicalizedResource")]
pub canonicalized_resource: String,
#[serde(rename = "signedResource", skip_serializing_if = "Option::is_none")]
pub signed_resource: Option<service_sas_parameters::SignedResource>,
#[serde(rename = "signedPermission", skip_serializing_if = "Option::is_none")]
pub signed_permission: Option<service_sas_parameters::SignedPermission>,
#[serde(rename = "signedIp", skip_serializing_if = "Option::is_none")]
pub signed_ip: Option<String>,
#[serde(rename = "signedProtocol", skip_serializing_if = "Option::is_none")]
pub signed_protocol: Option<service_sas_parameters::SignedProtocol>,
#[serde(rename = "signedStart", skip_serializing_if = "Option::is_none")]
pub signed_start: Option<String>,
#[serde(rename = "signedExpiry", skip_serializing_if = "Option::is_none")]
pub signed_expiry: Option<String>,
#[serde(rename = "signedIdentifier", skip_serializing_if = "Option::is_none")]
pub signed_identifier: Option<String>,
#[serde(rename = "startPk", skip_serializing_if = "Option::is_none")]
pub start_pk: Option<String>,
#[serde(rename = "endPk", skip_serializing_if = "Option::is_none")]
pub end_pk: Option<String>,
#[serde(rename = "startRk", skip_serializing_if = "Option::is_none")]
pub start_rk: Option<String>,
#[serde(rename = "endRk", skip_serializing_if = "Option::is_none")]
pub end_rk: Option<String>,
#[serde(rename = "keyToSign", skip_serializing_if = "Option::is_none")]
pub key_to_sign: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub rscc: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub rscd: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub rsce: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub rscl: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub rsct: Option<String>,
}
pub mod service_sas_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SignedResource {
#[serde(rename = "b")]
B,
#[serde(rename = "c")]
C,
#[serde(rename = "f")]
F,
#[serde(rename = "s")]
S,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SignedPermission {
#[serde(rename = "r")]
R,
#[serde(rename = "d")]
D,
#[serde(rename = "w")]
W,
#[serde(rename = "l")]
L,
#[serde(rename = "a")]
A,
#[serde(rename = "c")]
C,
#[serde(rename = "u")]
U,
#[serde(rename = "p")]
P,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SignedProtocol {
#[serde(rename = "https,http")]
HttpsHttp,
#[serde(rename = "https")]
Https,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListServiceSasResponse {
#[serde(rename = "serviceSasToken", skip_serializing)]
pub service_sas_token: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagementPolicy {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<ManagementPolicyProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagementPolicyProperties {
#[serde(rename = "lastModifiedTime", skip_serializing)]
pub last_modified_time: Option<String>,
pub policy: ManagementPolicySchema,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagementPolicySchema {
pub rules: Vec<ManagementPolicyRule>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagementPolicyRule {
#[serde(skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
pub name: String,
#[serde(rename = "type")]
pub type_: management_policy_rule::Type,
pub definition: ManagementPolicyDefinition,
}
pub mod management_policy_rule {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Lifecycle,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagementPolicyDefinition {
pub actions: ManagementPolicyAction,
#[serde(skip_serializing_if = "Option::is_none")]
pub filters: Option<ManagementPolicyFilter>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagementPolicyFilter {
#[serde(rename = "prefixMatch", skip_serializing_if = "Vec::is_empty")]
pub prefix_match: Vec<String>,
#[serde(rename = "blobTypes")]
pub blob_types: Vec<String>,
#[serde(rename = "blobIndexMatch", skip_serializing_if = "Vec::is_empty")]
pub blob_index_match: Vec<TagFilter>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagFilter {
pub name: String,
pub op: String,
pub value: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagementPolicyAction {
#[serde(rename = "baseBlob", skip_serializing_if = "Option::is_none")]
pub base_blob: Option<ManagementPolicyBaseBlob>,
#[serde(skip_serializing_if = "Option::is_none")]
pub snapshot: Option<ManagementPolicySnapShot>,
#[serde(skip_serializing_if = "Option::is_none")]
pub version: Option<ManagementPolicyVersion>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagementPolicyBaseBlob {
#[serde(rename = "tierToCool", skip_serializing_if = "Option::is_none")]
pub tier_to_cool: Option<DateAfterModification>,
#[serde(rename = "tierToArchive", skip_serializing_if = "Option::is_none")]
pub tier_to_archive: Option<DateAfterModification>,
#[serde(skip_serializing_if = "Option::is_none")]
pub delete: Option<DateAfterModification>,
#[serde(rename = "enableAutoTierToHotFromCool", skip_serializing_if = "Option::is_none")]
pub enable_auto_tier_to_hot_from_cool: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagementPolicySnapShot {
#[serde(rename = "tierToCool", skip_serializing_if = "Option::is_none")]
pub tier_to_cool: Option<DateAfterCreation>,
#[serde(rename = "tierToArchive", skip_serializing_if = "Option::is_none")]
pub tier_to_archive: Option<DateAfterCreation>,
#[serde(skip_serializing_if = "Option::is_none")]
pub delete: Option<DateAfterCreation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagementPolicyVersion {
#[serde(rename = "tierToCool", skip_serializing_if = "Option::is_none")]
pub tier_to_cool: Option<DateAfterCreation>,
#[serde(rename = "tierToArchive", skip_serializing_if = "Option::is_none")]
pub tier_to_archive: Option<DateAfterCreation>,
#[serde(skip_serializing_if = "Option::is_none")]
pub delete: Option<DateAfterCreation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DateAfterModification {
#[serde(rename = "daysAfterModificationGreaterThan", skip_serializing_if = "Option::is_none")]
pub days_after_modification_greater_than: Option<f64>,
#[serde(rename = "daysAfterLastAccessTimeGreaterThan", skip_serializing_if = "Option::is_none")]
pub days_after_last_access_time_greater_than: Option<f64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DateAfterCreation {
#[serde(rename = "daysAfterCreationGreaterThan")]
pub days_after_creation_greater_than: f64,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionScope {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<EncryptionScopeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionScopeProperties {
#[serde(skip_serializing_if = "Option::is_none")]
pub source: Option<encryption_scope_properties::Source>,
#[serde(skip_serializing_if = "Option::is_none")]
pub state: Option<encryption_scope_properties::State>,
#[serde(rename = "creationTime", skip_serializing)]
pub creation_time: Option<String>,
#[serde(rename = "lastModifiedTime", skip_serializing)]
pub last_modified_time: Option<String>,
#[serde(rename = "keyVaultProperties", skip_serializing_if = "Option::is_none")]
pub key_vault_properties: Option<EncryptionScopeKeyVaultProperties>,
#[serde(rename = "requireInfrastructureEncryption", skip_serializing_if = "Option::is_none")]
pub require_infrastructure_encryption: Option<bool>,
}
pub mod encryption_scope_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Source {
#[serde(rename = "Microsoft.Storage")]
MicrosoftStorage,
#[serde(rename = "Microsoft.KeyVault")]
MicrosoftKeyVault,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Enabled,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionScopeKeyVaultProperties {
#[serde(rename = "keyUri", skip_serializing_if = "Option::is_none")]
pub key_uri: Option<String>,
#[serde(rename = "currentVersionedKeyIdentifier", skip_serializing)]
pub current_versioned_key_identifier: Option<String>,
#[serde(rename = "lastKeyRotationTimestamp", skip_serializing)]
pub last_key_rotation_timestamp: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionScopeListResult {
#[serde(skip_serializing)]
pub value: Vec<EncryptionScope>,
#[serde(rename = "nextLink", skip_serializing)]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ObjectReplicationPolicies {
#[serde(skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ObjectReplicationPolicy>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ObjectReplicationPolicy {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<ObjectReplicationPolicyProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ObjectReplicationPolicyProperties {
#[serde(rename = "policyId", skip_serializing)]
pub policy_id: Option<String>,
#[serde(rename = "enabledTime", skip_serializing)]
pub enabled_time: Option<String>,
#[serde(rename = "sourceAccount")]
pub source_account: String,
#[serde(rename = "destinationAccount")]
pub destination_account: String,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub rules: Vec<ObjectReplicationPolicyRule>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ObjectReplicationPolicyRule {
#[serde(rename = "ruleId", skip_serializing_if = "Option::is_none")]
pub rule_id: Option<String>,
#[serde(rename = "sourceContainer")]
pub source_container: String,
#[serde(rename = "destinationContainer")]
pub destination_container: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub filters: Option<ObjectReplicationPolicyFilter>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ObjectReplicationPolicyFilter {
#[serde(rename = "prefixMatch", skip_serializing_if = "Vec::is_empty")]
pub prefix_match: Vec<String>,
#[serde(rename = "minCreationTime", skip_serializing_if = "Option::is_none")]
pub min_creation_time: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListBlobInventoryPolicy {
#[serde(skip_serializing)]
pub value: Vec<BlobInventoryPolicy>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobInventoryPolicy {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<BlobInventoryPolicyProperties>,
#[serde(rename = "systemData", skip_serializing)]
pub system_data: Option<SystemData>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobInventoryPolicyProperties {
#[serde(rename = "lastModifiedTime", skip_serializing)]
pub last_modified_time: Option<String>,
pub policy: BlobInventoryPolicySchema,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobInventoryPolicySchema {
pub enabled: bool,
pub destination: String,
#[serde(rename = "type")]
pub type_: blob_inventory_policy_schema::Type,
pub rules: Vec<BlobInventoryPolicyRule>,
}
pub mod blob_inventory_policy_schema {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Inventory,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobInventoryPolicyRule {
pub enabled: bool,
pub name: String,
pub definition: BlobInventoryPolicyDefinition,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobInventoryPolicyDefinition {
pub filters: BlobInventoryPolicyFilter,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobInventoryPolicyFilter {
#[serde(rename = "prefixMatch", skip_serializing_if = "Vec::is_empty")]
pub prefix_match: Vec<String>,
#[serde(rename = "blobTypes")]
pub blob_types: Vec<String>,
#[serde(rename = "includeBlobVersions", skip_serializing_if = "Option::is_none")]
pub include_blob_versions: Option<bool>,
#[serde(rename = "includeSnapshots", skip_serializing_if = "Option::is_none")]
pub include_snapshots: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorResponseBody {
#[serde(skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorResponse {
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorResponseBody>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContainerProperties {
#[serde(skip_serializing)]
pub version: Option<String>,
#[serde(skip_serializing)]
pub deleted: Option<bool>,
#[serde(rename = "deletedTime", skip_serializing)]
pub deleted_time: Option<String>,
#[serde(rename = "remainingRetentionDays", skip_serializing)]
pub remaining_retention_days: Option<i64>,
#[serde(rename = "defaultEncryptionScope", skip_serializing_if = "Option::is_none")]
pub default_encryption_scope: Option<String>,
#[serde(rename = "denyEncryptionScopeOverride", skip_serializing_if = "Option::is_none")]
pub deny_encryption_scope_override: Option<bool>,
#[serde(rename = "publicAccess", skip_serializing_if = "Option::is_none")]
pub public_access: Option<container_properties::PublicAccess>,
#[serde(rename = "lastModifiedTime", skip_serializing)]
pub last_modified_time: Option<String>,
#[serde(rename = "leaseStatus", skip_serializing)]
pub lease_status: Option<container_properties::LeaseStatus>,
#[serde(rename = "leaseState", skip_serializing)]
pub lease_state: Option<container_properties::LeaseState>,
#[serde(rename = "leaseDuration", skip_serializing)]
pub lease_duration: Option<container_properties::LeaseDuration>,
#[serde(skip_serializing_if = "Option::is_none")]
pub metadata: Option<serde_json::Value>,
#[serde(rename = "immutabilityPolicy", skip_serializing_if = "Option::is_none")]
pub immutability_policy: Option<ImmutabilityPolicyProperties>,
#[serde(rename = "legalHold", skip_serializing_if = "Option::is_none")]
pub legal_hold: Option<LegalHoldProperties>,
#[serde(rename = "hasLegalHold", skip_serializing)]
pub has_legal_hold: Option<bool>,
#[serde(rename = "hasImmutabilityPolicy", skip_serializing)]
pub has_immutability_policy: Option<bool>,
}
pub mod container_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PublicAccess {
Container,
Blob,
None,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LeaseStatus {
Locked,
Unlocked,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LeaseState {
Available,
Leased,
Expired,
Breaking,
Broken,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LeaseDuration {
Infinite,
Fixed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobContainer {
#[serde(flatten)]
pub azure_entity_resource: AzureEntityResource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<ContainerProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImmutabilityPolicyProperty {
#[serde(rename = "immutabilityPeriodSinceCreationInDays", skip_serializing_if = "Option::is_none")]
pub immutability_period_since_creation_in_days: Option<i64>,
#[serde(skip_serializing)]
pub state: Option<immutability_policy_property::State>,
#[serde(rename = "allowProtectedAppendWrites", skip_serializing_if = "Option::is_none")]
pub allow_protected_append_writes: Option<bool>,
}
pub mod immutability_policy_property {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Locked,
Unlocked,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImmutabilityPolicyProperties {
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<ImmutabilityPolicyProperty>,
#[serde(skip_serializing)]
pub etag: Option<String>,
#[serde(rename = "updateHistory", skip_serializing)]
pub update_history: Vec<UpdateHistoryProperty>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImmutabilityPolicy {
#[serde(flatten)]
pub azure_entity_resource: AzureEntityResource,
pub properties: ImmutabilityPolicyProperty,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpdateHistoryProperty {
#[serde(skip_serializing)]
pub update: Option<update_history_property::Update>,
#[serde(rename = "immutabilityPeriodSinceCreationInDays", skip_serializing)]
pub immutability_period_since_creation_in_days: Option<i64>,
#[serde(skip_serializing)]
pub timestamp: Option<String>,
#[serde(rename = "objectIdentifier", skip_serializing)]
pub object_identifier: Option<String>,
#[serde(rename = "tenantId", skip_serializing)]
pub tenant_id: Option<String>,
#[serde(skip_serializing)]
pub upn: Option<String>,
}
pub mod update_history_property {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Update {
#[serde(rename = "put")]
Put,
#[serde(rename = "lock")]
Lock,
#[serde(rename = "extend")]
Extend,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LegalHoldProperties {
#[serde(rename = "hasLegalHold", skip_serializing)]
pub has_legal_hold: Option<bool>,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub tags: Vec<TagProperty>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagProperty {
#[serde(skip_serializing)]
pub tag: Option<String>,
#[serde(skip_serializing)]
pub timestamp: Option<String>,
#[serde(rename = "objectIdentifier", skip_serializing)]
pub object_identifier: Option<String>,
#[serde(rename = "tenantId", skip_serializing)]
pub tenant_id: Option<String>,
#[serde(skip_serializing)]
pub upn: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LegalHold {
#[serde(rename = "hasLegalHold", skip_serializing)]
pub has_legal_hold: Option<bool>,
pub tags: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListContainerItem {
#[serde(flatten)]
pub azure_entity_resource: AzureEntityResource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<ContainerProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListContainerItems {
#[serde(skip_serializing)]
pub value: Vec<ListContainerItem>,
#[serde(rename = "nextLink", skip_serializing)]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobServiceProperties {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<blob_service_properties::Properties>,
#[serde(skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
}
pub mod blob_service_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Properties {
#[serde(skip_serializing_if = "Option::is_none")]
pub cors: Option<CorsRules>,
#[serde(rename = "defaultServiceVersion", skip_serializing_if = "Option::is_none")]
pub default_service_version: Option<String>,
#[serde(rename = "deleteRetentionPolicy", skip_serializing_if = "Option::is_none")]
pub delete_retention_policy: Option<DeleteRetentionPolicy>,
#[serde(rename = "isVersioningEnabled", skip_serializing_if = "Option::is_none")]
pub is_versioning_enabled: Option<bool>,
#[serde(rename = "automaticSnapshotPolicyEnabled", skip_serializing_if = "Option::is_none")]
pub automatic_snapshot_policy_enabled: Option<bool>,
#[serde(rename = "changeFeed", skip_serializing_if = "Option::is_none")]
pub change_feed: Option<ChangeFeed>,
#[serde(rename = "restorePolicy", skip_serializing_if = "Option::is_none")]
pub restore_policy: Option<RestorePolicyProperties>,
#[serde(rename = "containerDeleteRetentionPolicy", skip_serializing_if = "Option::is_none")]
pub container_delete_retention_policy: Option<DeleteRetentionPolicy>,
#[serde(rename = "lastAccessTimeTrackingPolicy", skip_serializing_if = "Option::is_none")]
pub last_access_time_tracking_policy: Option<LastAccessTimeTrackingPolicy>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobServiceItems {
#[serde(skip_serializing)]
pub value: Vec<BlobServiceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ChangeFeed {
#[serde(skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(rename = "retentionInDays", skip_serializing_if = "Option::is_none")]
pub retention_in_days: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePolicyProperties {
pub enabled: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub days: Option<i64>,
#[serde(rename = "lastEnabledTime", skip_serializing)]
pub last_enabled_time: Option<String>,
#[serde(rename = "minRestoreTime", skip_serializing)]
pub min_restore_time: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LastAccessTimeTrackingPolicy {
pub enable: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<last_access_time_tracking_policy::Name>,
#[serde(rename = "trackingGranularityInDays", skip_serializing_if = "Option::is_none")]
pub tracking_granularity_in_days: Option<i32>,
#[serde(rename = "blobType", skip_serializing_if = "Vec::is_empty")]
pub blob_type: Vec<String>,
}
pub mod last_access_time_tracking_policy {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Name {
AccessTimeTracking,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LeaseContainerRequest {
pub action: lease_container_request::Action,
#[serde(rename = "leaseId", skip_serializing_if = "Option::is_none")]
pub lease_id: Option<String>,
#[serde(rename = "breakPeriod", skip_serializing_if = "Option::is_none")]
pub break_period: Option<i64>,
#[serde(rename = "leaseDuration", skip_serializing_if = "Option::is_none")]
pub lease_duration: Option<i64>,
#[serde(rename = "proposedLeaseId", skip_serializing_if = "Option::is_none")]
pub proposed_lease_id: Option<String>,
}
pub mod lease_container_request {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Action {
Acquire,
Renew,
Change,
Release,
Break,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LeaseContainerResponse {
#[serde(rename = "leaseId", skip_serializing_if = "Option::is_none")]
pub lease_id: Option<String>,
#[serde(rename = "leaseTimeSeconds", skip_serializing_if = "Option::is_none")]
pub lease_time_seconds: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileServiceItems {
#[serde(skip_serializing)]
pub value: Vec<FileServiceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileServiceProperties {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<file_service_properties::Properties>,
#[serde(skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
}
pub mod file_service_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Properties {
#[serde(skip_serializing_if = "Option::is_none")]
pub cors: Option<CorsRules>,
#[serde(rename = "shareDeleteRetentionPolicy", skip_serializing_if = "Option::is_none")]
pub share_delete_retention_policy: Option<DeleteRetentionPolicy>,
#[serde(rename = "protocolSettings", skip_serializing_if = "Option::is_none")]
pub protocol_settings: Option<ProtocolSettings>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProtocolSettings {
#[serde(skip_serializing_if = "Option::is_none")]
pub smb: Option<SmbSetting>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SmbSetting {
#[serde(skip_serializing_if = "Option::is_none")]
pub multichannel: Option<Multichannel>,
#[serde(skip_serializing_if = "Option::is_none")]
pub versions: Option<String>,
#[serde(rename = "authenticationMethods", skip_serializing_if = "Option::is_none")]
pub authentication_methods: Option<String>,
#[serde(rename = "kerberosTicketEncryption", skip_serializing_if = "Option::is_none")]
pub kerberos_ticket_encryption: Option<String>,
#[serde(rename = "channelEncryption", skip_serializing_if = "Option::is_none")]
pub channel_encryption: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Multichannel {
#[serde(skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileShare {
#[serde(flatten)]
pub azure_entity_resource: AzureEntityResource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<FileShareProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeletedShare {
#[serde(rename = "deletedShareName")]
pub deleted_share_name: String,
#[serde(rename = "deletedShareVersion")]
pub deleted_share_version: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileShareItem {
#[serde(flatten)]
pub azure_entity_resource: AzureEntityResource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<FileShareProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileShareItems {
#[serde(skip_serializing)]
pub value: Vec<FileShareItem>,
#[serde(rename = "nextLink", skip_serializing)]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileShareProperties {
#[serde(rename = "lastModifiedTime", skip_serializing)]
pub last_modified_time: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub metadata: Option<serde_json::Value>,
#[serde(rename = "shareQuota", skip_serializing_if = "Option::is_none")]
pub share_quota: Option<i64>,
#[serde(rename = "enabledProtocols", skip_serializing_if = "Option::is_none")]
pub enabled_protocols: Option<file_share_properties::EnabledProtocols>,
#[serde(rename = "rootSquash", skip_serializing_if = "Option::is_none")]
pub root_squash: Option<file_share_properties::RootSquash>,
#[serde(skip_serializing)]
pub version: Option<String>,
#[serde(skip_serializing)]
pub deleted: Option<bool>,
#[serde(rename = "deletedTime", skip_serializing)]
pub deleted_time: Option<String>,
#[serde(rename = "remainingRetentionDays", skip_serializing)]
pub remaining_retention_days: Option<i64>,
#[serde(rename = "accessTier", skip_serializing_if = "Option::is_none")]
pub access_tier: Option<file_share_properties::AccessTier>,
#[serde(rename = "accessTierChangeTime", skip_serializing)]
pub access_tier_change_time: Option<String>,
#[serde(rename = "accessTierStatus", skip_serializing)]
pub access_tier_status: Option<String>,
#[serde(rename = "shareUsageBytes", skip_serializing)]
pub share_usage_bytes: Option<i64>,
#[serde(rename = "snapshotTime", skip_serializing)]
pub snapshot_time: Option<String>,
}
pub mod file_share_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum EnabledProtocols {
#[serde(rename = "SMB")]
Smb,
#[serde(rename = "NFS")]
Nfs,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RootSquash {
NoRootSquash,
RootSquash,
AllSquash,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AccessTier {
TransactionOptimized,
Hot,
Cool,
Premium,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudError {
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<CloudErrorBody>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudErrorBody {
#[serde(skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub details: Vec<CloudErrorBody>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListQueueServices {
#[serde(skip_serializing)]
pub value: Vec<QueueServiceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueueServiceProperties {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<queue_service_properties::Properties>,
}
pub mod queue_service_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Properties {
#[serde(skip_serializing_if = "Option::is_none")]
pub cors: Option<CorsRules>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageQueue {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<QueueProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueueProperties {
#[serde(skip_serializing_if = "Option::is_none")]
pub metadata: Option<serde_json::Value>,
#[serde(rename = "approximateMessageCount", skip_serializing)]
pub approximate_message_count: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListQueue {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<ListQueueProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListQueueProperties {
#[serde(skip_serializing_if = "Option::is_none")]
pub metadata: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListQueueResource {
#[serde(skip_serializing)]
pub value: Vec<ListQueue>,
#[serde(rename = "nextLink", skip_serializing)]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListTableServices {
#[serde(skip_serializing)]
pub value: Vec<TableServiceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TableServiceProperties {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<table_service_properties::Properties>,
}
pub mod table_service_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Properties {
#[serde(skip_serializing_if = "Option::is_none")]
pub cors: Option<CorsRules>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Table {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<TableProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TableProperties {
#[serde(rename = "tableName", skip_serializing)]
pub table_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListTableResource {
#[serde(skip_serializing)]
pub value: Vec<Table>,
#[serde(rename = "nextLink", skip_serializing)]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionListResult {
#[serde(skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateEndpointConnection>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnection {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateEndpointConnectionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionProperties {
#[serde(rename = "privateEndpoint", skip_serializing_if = "Option::is_none")]
pub private_endpoint: Option<PrivateEndpoint>,
#[serde(rename = "privateLinkServiceConnectionState")]
pub private_link_service_connection_state: PrivateLinkServiceConnectionState,
#[serde(rename = "provisioningState", skip_serializing)]
pub provisioning_state: Option<PrivateEndpointConnectionProvisioningState>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpoint {
#[serde(skip_serializing)]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServiceConnectionState {
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<PrivateEndpointServiceConnectionStatus>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "actionRequired", skip_serializing_if = "Option::is_none")]
pub action_required: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointServiceConnectionStatus {
Pending,
Approved,
Rejected,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointConnectionProvisioningState {
Succeeded,
Creating,
Deleting,
Failed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(skip_serializing)]
pub id: Option<String>,
#[serde(skip_serializing)]
pub name: Option<String>,
#[serde(rename = "type", skip_serializing)]
pub type_: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResourceListResult {
#[serde(skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateLinkResource>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateLinkResourceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResourceProperties {
#[serde(rename = "groupId", skip_serializing)]
pub group_id: Option<String>,
#[serde(rename = "requiredMembers", skip_serializing)]
pub required_members: Vec<String>,
#[serde(rename = "requiredZoneNames", skip_serializing_if = "Vec::is_empty")]
pub required_zone_names: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SkuName {
#[serde(rename = "Standard_LRS")]
StandardLrs,
#[serde(rename = "Standard_GRS")]
StandardGrs,
#[serde(rename = "Standard_RAGRS")]
StandardRagrs,
#[serde(rename = "Standard_ZRS")]
StandardZrs,
#[serde(rename = "Premium_LRS")]
PremiumLrs,
#[serde(rename = "Premium_ZRS")]
PremiumZrs,
#[serde(rename = "Standard_GZRS")]
StandardGzrs,
#[serde(rename = "Standard_RAGZRS")]
StandardRagzrs,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Tier {
Standard,
Premium,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Sku {
pub name: SkuName,
#[serde(skip_serializing)]
pub tier: Option<Tier>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProxyResource {
#[serde(flatten)]
pub resource: Resource,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TrackedResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
pub location: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SystemData {
#[serde(rename = "createdBy", skip_serializing_if = "Option::is_none")]
pub created_by: Option<String>,
#[serde(rename = "createdByType", skip_serializing_if = "Option::is_none")]
pub created_by_type: Option<system_data::CreatedByType>,
#[serde(rename = "createdAt", skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "lastModifiedBy", skip_serializing_if = "Option::is_none")]
pub last_modified_by: Option<String>,
#[serde(rename = "lastModifiedByType", skip_serializing_if = "Option::is_none")]
pub last_modified_by_type: Option<system_data::LastModifiedByType>,
#[serde(rename = "lastModifiedAt", skip_serializing_if = "Option::is_none")]
pub last_modified_at: Option<String>,
}
pub mod system_data {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CreatedByType {
User,
Application,
ManagedIdentity,
Key,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LastModifiedByType {
User,
Application,
ManagedIdentity,
Key,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureEntityResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing)]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CorsRules {
#[serde(rename = "corsRules", skip_serializing_if = "Vec::is_empty")]
pub cors_rules: Vec<CorsRule>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CorsRule {
#[serde(rename = "allowedOrigins")]
pub allowed_origins: Vec<String>,
#[serde(rename = "allowedMethods")]
pub allowed_methods: Vec<String>,
#[serde(rename = "maxAgeInSeconds")]
pub max_age_in_seconds: i64,
#[serde(rename = "exposedHeaders")]
pub exposed_headers: Vec<String>,
#[serde(rename = "allowedHeaders")]
pub allowed_headers: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeleteRetentionPolicy {
#[serde(skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub days: Option<i64>,
}
| 38.999505 | 108 | 0.703289 |
f5ae8ab5c5487b71b7a99e8a157d3440ff504ef7 | 10,806 | use std::os::raw::{c_char, c_int};
pub type BOOL = c_int;
/* ------------------------------------------------------------ */
/* Miscellaneous Declarations */
/* ------------------------------------------------------------ */
pub const MAX_PATH: usize = 260; // this is the current windows definition
/* ------------------------------------------------------------ */
/* General Type Declarations */
/* ------------------------------------------------------------ */
/* These symbols define the maximum allowed length for various
** strings used by the interfaces.
*/
pub const cchAliasMax: usize = 16; //max length of device table alias string
pub const cchUsrNameMax: usize = 16; //max length of user settable device name
pub const cchProdNameMax: usize = 28; //max length of product name string
pub const cchSnMax: usize = 15; //length of a serial number string
pub const cchVersionMax: usize = 256; //max length returned for DLL version string
pub const cchDvcNameMax: usize = 64; //size of name field in DVC structure
pub const cchDtpStringMax: usize = 16; //maximum length of DTP name string
pub const cchErcMax: usize = 48; //maximum length of error code symbolic name
pub const cchErcMsgMax: usize = 128; //maximum length of error message descriptive string
/* The device management capabilities value indicates which device
** management function sets are supported by the device. Device
** management function sets apply to a device as a whole. For example,
** the mgtcapPower capability indicates that the device supports the
** power on/off capability.
*/
pub type MGTCAP = u32; // management capabilities
/* The device interface capabilties value indicates which interface types
** are supported by the device or being requested by the application.
*/
pub type DCAP = u32; //capabilities bitfield
pub const dcapJtag: DCAP = 0x00000001; //this symbol is deprecated
pub const dcapJtg: DCAP = 0x00000001;
pub const dcapPio: DCAP = 0x00000002;
pub const dcapEpp: DCAP = 0x00000004;
pub const dcapStm: DCAP = 0x00000008;
pub const dcapSpi: DCAP = 0x00000010;
pub const dcapTwi: DCAP = 0x00000020;
pub const dcapAci: DCAP = 0x00000040;
pub const dcapAio: DCAP = 0x00000080;
pub const dcapEmc: DCAP = 0x00000100;
pub const dcapDci: DCAP = 0x00000200;
pub const dcapGio: DCAP = 0x00000400;
pub const dcapPti: DCAP = 0x00000800;
pub const dcapAll: DCAP = 0xFFFFFFFF;
/* The port properties values are used by each protocol type to
** indicate details about the features supported by each individual
** port. The type is declared here. The properties values are
** defined in the protocol specific header files.
*/
pub type DPRP = u32;
/* Device type indicates which physical transport and protocol are used to
** access the device. The lower 16 bits are interpreted as a bitfield that
** is used to specify the type of transport used by the device. The upper
** 16 bits are interpreted as the protocol used to communicate with a
** device of the specified transport type. Please note that specification
** of the protocol is optional and if no protocol is specified then
** communication with all devices of a particular transport type will be
** attempted.
*/
pub type DTP = u32;
pub const dtpUSB: DTP = 0x00000001;
pub const dtpEthernet: DTP = 0x00000002;
pub const dtpParallel: DTP = 0x00000004;
pub const dtpSerial: DTP = 0x00000008;
pub const dtpNone: DTP = 0x00000000;
pub const dtpAll: DTP = 0xFFFFFFFF;
pub const dtpNil: DTP = 0;
pub type TPT = u16;
pub const tptUSB: TPT = 0x0001;
pub const tptEthernet: TPT = 0x0002;
pub const tptParallel: TPT = 0x0004;
pub const tptSerial: TPT = 0x0008;
pub const tptNone: TPT = 0x0000;
pub const tptAll: TPT = 0xFFFF;
pub const tptNil: TPT = 0x0000;
pub type PTC = u16;
pub const ptcProtocol1: PTC = 0x0001;
pub const ptcProtocol2: PTC = 0x0002;
pub const ptcProtocol3: PTC = 0x0004;
pub const ptcProtocol4: PTC = 0x0008;
pub const ptcProtocol5: PTC = 0x0010;
pub const ptcProtocol6: PTC = 0x0020;
pub const ptcProtocol7: PTC = 0x0040;
pub const ptcProtocol8: PTC = 0x0080;
pub const ptcProtocol9: PTC = 0x0100;
pub const ptcProtocol10: PTC = 0x0200;
pub const ptcProtocol11: PTC = 0x0400;
pub const ptcProtocol12: PTC = 0x0800;
pub const ptcProtocol13: PTC = 0x1000;
pub const ptcProtocol14: PTC = 0x2000;
pub const ptcProtocol15: PTC = 0x4000;
pub const ptcProtocol16: PTC = 0x8000;
pub const ptcAll: PTC = 0x0000;
pub const ptcNil: PTC = 0x0000;
#[inline]
pub unsafe fn TptFromDtp(dtp: DTP) -> TPT {
(dtp & 0xFFFF) as TPT
}
#[inline]
pub unsafe fn PtcFromDtp(dtp: DTP) -> PTC {
((dtp >> 16) & 0xFFFF) as PTC
}
#[inline]
pub unsafe fn DtpFromTptPtc(tpt: TPT, ptc: PTC) -> DTP {
tpt as DTP | (ptc as DTP) << 16
}
/* Device interface handle.
*/
pub type HIF = u32;
pub const hifInvalid: HIF = 0;
/* These values are used to report various attributes of a device.
*/
pub type PDID = u32; // device product id
pub type FWTYPE = u16;
pub type FWVER = u16; // device firmware version number
pub type FWID = u8; // device firmware identifier
#[inline]
pub unsafe fn ProductFromPdid(pdid: PDID) -> c_int {
((pdid >> 20) & 0xFFF) as c_int
}
#[inline]
pub unsafe fn VariantFromPdid(pdid: PDID) -> c_int {
((pdid >> 8) & 0xFFF) as c_int
}
#[inline]
pub unsafe fn FwidFromPdid(pdid: PDID) -> FWID {
(pdid & 0xFF) as FWID
}
/* These values are used to retrieve or set various information about
** a device.
*/
pub type DINFO = u32;
// public
pub const dinfoNone: DINFO = 0;
pub const dinfoAlias: DINFO = 1;
pub const dinfoUsrName: DINFO = 2;
pub const dinfoProdName: DINFO = 3;
pub const dinfoPDID: DINFO = 4;
pub const dinfoSN: DINFO = 5;
pub const dinfoIP: DINFO = 6;
pub const dinfoMAC: DINFO = 7; //Ethernet MAC and SN are the same
pub const dinfoDCAP: DINFO = 9;
pub const dinfoSerParam: DINFO = 10;
pub const dinfoParAddr: DINFO = 11;
pub const dinfoUsbPath: DINFO = 12;
pub const dinfoProdID: DINFO = 13; // the ProductID from PDID
pub const dinfoOpenCount: DINFO = 14; // how many times a device is opened
pub const dinfoFWVER: DINFO = 15;
/* Error codes
*/
pub type ERC = c_int;
pub const ercNoErc: ERC = 0; // No error occurred
// The following error codes can be directly mapped to the device error codes.
pub const ercNotSupported: ERC = 1; // Capability or function not supported by the device
pub const ercTransferCancelled: ERC = 2; // The transfer was cancelled or timeout occured
pub const ercCapabilityConflict: ERC = 3; // Tried to enable capabilities that use shared resources, check device datasheet
pub const ercCapabilityNotEnabled: ERC = 4; // The protocol is not enabled
pub const ercEppAddressTimeout: ERC = 5; // EPP Address strobe timeout
pub const ercEppDataTimeout: ERC = 6; // EPP Data strobe timeout
pub const ercDataSndLess: ERC = 7; // Data send failed or peripheral did not received all the sent data
pub const ercDataRcvLess: ERC = 8; // Data receive failed or peripheral sent less data
pub const ercDataRcvMore: ERC = 9; // Peripheral sent more data
pub const ercDataSndLessRcvLess: ERC = 10; // Two errors: ercDataSndLess and ercDataRcvLess
pub const ercDataSndLessRcvMore: ERC = 11; // Two errors: ercDataSndLess and ercDataSndFailRcvMore
pub const ercInvalidPort: ERC = 12; // Attempt to enable port when another port is already enabled
pub const ercBadParameter: ERC = 13; // Command parameter out of range
// ACI error codes, directly mapped to device error codes.
pub const ercAciFifoFull: ERC = 0x20; // Transmit FIFO overflow
// TWI error codes, directly mapped to device error codes.
pub const ercTwiBadBatchCmd: ERC = 0x20; // Bad command in TWI batch buffer
pub const ercTwiBusBusy: ERC = 0x21; // Timed out waiting for TWI bus
pub const ercTwiAdrNak: ERC = 0x22; // TWI address not ack'd
pub const ercTwiDataNak: ERC = 0x23; // TWI data not ack'd
pub const ercTwiSmbPecError: ERC = 0x24; // Packet error when using packet error checking
// Most likely the user did something wrong.
pub const ercAlreadyOpened: ERC = 1024; // Device already opened
pub const ercInvalidHif: ERC = 1025; // Invalid interface handle provided, fist call DmgrOpen(Ex)
pub const ercInvalidParameter: ERC = 1026; // Invalid parameter sent in API call
pub const ercTransferPending: ERC = 1031; // The last API called in overlapped mode was not finished. Use DmgrGetTransStat or DmgrCancelTrans
pub const ercApiLockTimeout: ERC = 1032; // API waiting on pending API timed out
pub const ercPortConflict: ERC = 1033; // Attempt to enable port when another port is already enabled
// Not the user's fault.
pub const ercConnectionFailed: ERC = 3072; // Unknown fail of connection
pub const ercControlTransferFailed: ERC = 3075; // Control transfer failed
pub const ercCmdSendFailed: ERC = 3076; // Command sending failed
pub const ercStsReceiveFailed: ERC = 3077; // Status receiving failed
pub const ercInsufficientResources: ERC = 3078; // Memory allocation failed, insufficient system resources
pub const ercInvalidTFP: ERC = 3079; // Internal protocol error, DVT rejected the transfer strcuture sent by public API
pub const ercInternalError: ERC = 3080; // Internal error
pub const ercTooManyOpenedDevices: ERC = 3081; // Internal error
pub const ercConfigFileError: ERC = 3082; // Processing of configuration file failed
pub const ercDeviceNotConnected: ERC = 3083; // Device not connected
pub const ercEnumNotFree: ERC = 3084; // Device Enumeration failed because another enumeration is still running.
pub const ercEnumFreeFail: ERC = 3085; // Device Enumeration list could not be freed
pub const ercInvalidDevice: ERC = 3086; // OEM ID check failed
pub const ercDeviceBusy: ERC = 3087; // The device is currently claimed by another process.
pub const ercCorruptInstallation: ERC = 3088; // One or more critical file is missing from the system.
pub const ercDabsInitFailed: ERC = 3089; // Initialization of the DABS library failed
pub const ercDpcommInitFailed: ERC = 3090; // Initialization of the DPCOMM library failed
//ENUM errors
//DVTBL errors
/* ------------------------------------------------------------ */
/* Data Structure Declarations */
/* ------------------------------------------------------------ */
#[repr(C)]
pub struct DVC {
pub szName: [c_char; cchDvcNameMax],
//in dvctable: Alias
//not in dvctable: user assigned name in device
//not in dvctable, no user defined name: device type with identifier
pub szConn: [c_char; MAX_PATH + 1],
//in dvctable: connection string in dvctable
//not in dvctable: USB: PATHNAME
// Eth: IP:192.168.1.1
// Ser: COM1:9600,N,8,1
// EPP: EPP:0x378
pub dtp: DTP,
}
| 40.171004 | 142 | 0.704886 |
f77c7777a29a19305e0e38f18cf6c4297e0eeddf | 1,834 | use db::Database;
use hyper::{body, Body, HeaderMap, Response, StatusCode};
use model::quiz::Submission;
/// Attempts to create a new quiz. Returns the ObjectID of the document.
async fn try_submit_quiz(db: &Database, sub: &Submission) -> Result<[u8; 12], StatusCode> {
let choice_count = sub.quiz.choices.len();
if usize::from(sub.quiz.answer) >= choice_count || !(1..=25).contains(&choice_count) {
return Err(StatusCode::BAD_REQUEST);
}
match db.create_quiz(sub).await {
Ok(oid) => Ok(oid.bytes()),
Err(db::error::Error::AlreadyExists) => Err(StatusCode::FORBIDDEN),
_ => Err(StatusCode::INTERNAL_SERVER_ERROR),
}
}
pub async fn try_respond(body: Body, headers: &HeaderMap, db: &Database) -> Result<Response<Body>, StatusCode> {
// Retrieve the session from the cookie
let session = super::util::session::extract_session(headers)?;
let oid = db::ObjectId::parse_str(session).map_err(|_| StatusCode::BAD_REQUEST)?;
// Check database if user ID is present
let user = db
.get_session(oid)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?
.ok_or(StatusCode::UNAUTHORIZED)?
.as_user()
.ok_or(StatusCode::FORBIDDEN)?;
// Finally parse the JSON form submission
use body::Buf;
use model::quiz::Quiz;
let reader = body::aggregate(body).await.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?.reader();
let quiz: Quiz = serde_json::from_reader(reader).map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
// Submit the quiz to the database
use alloc::vec::Vec;
let submission = Submission { id: user, quiz };
let oid: Vec<_> = try_submit_quiz(db, &submission).await?.into();
let mut res = Response::new(oid.into());
*res.status_mut() = StatusCode::CREATED;
Ok(res)
}
| 39.021277 | 112 | 0.663032 |
760371ce533a41d0d3266ebcea6edd232c2d40d4 | 4,189 | //! # Object Records
//!
//! Each object Environment Record is associated with an object called its binding object.
//! An object Environment Record binds the set of string identifier names that directly
//! correspond to the property names of its binding object.
//! Property keys that are not strings in the form of an `IdentifierName` are not included in the set of bound identifiers.
//! More info: [Object Records](https://tc39.es/ecma262/#sec-object-environment-records)
use crate::{
environment::{
environment_record_trait::EnvironmentRecordTrait,
lexical_environment::{Environment, EnvironmentType},
},
property::{Attribute, DataDescriptor},
Value,
};
use gc::{Finalize, Trace};
#[derive(Debug, Trace, Finalize, Clone)]
pub struct ObjectEnvironmentRecord {
pub bindings: Value,
pub with_environment: bool,
pub outer_env: Option<Environment>,
}
impl EnvironmentRecordTrait for ObjectEnvironmentRecord {
fn has_binding(&self, name: &str) -> bool {
if self.bindings.has_field(name) {
if self.with_environment {
// TODO: implement unscopables
}
true
} else {
false
}
}
fn create_mutable_binding(&mut self, name: String, deletion: bool) {
// TODO: could save time here and not bother generating a new undefined object,
// only for it to be replace with the real value later. We could just add the name to a Vector instead
let bindings = &mut self.bindings;
let mut prop = DataDescriptor::new(
Value::undefined(),
Attribute::WRITABLE | Attribute::ENUMERABLE,
);
prop.set_configurable(deletion);
bindings.set_property(name, prop);
}
fn create_immutable_binding(&mut self, _name: String, _strict: bool) -> bool {
true
}
fn initialize_binding(&mut self, name: &str, value: Value) {
// We should never need to check if a binding has been created,
// As all calls to create_mutable_binding are followed by initialized binding
// The below is just a check.
debug_assert!(self.has_binding(&name));
self.set_mutable_binding(name, value, false)
}
fn set_mutable_binding(&mut self, name: &str, value: Value, strict: bool) {
debug_assert!(value.is_object() || value.is_function());
let mut property = DataDescriptor::new(value, Attribute::ENUMERABLE);
property.set_configurable(strict);
self.bindings
.as_object()
.expect("binding object")
.insert(name, property);
}
fn get_binding_value(&self, name: &str, strict: bool) -> Value {
if self.bindings.has_field(name) {
self.bindings.get_field(name)
} else {
if strict {
// TODO: throw error here
// Error handling not implemented yet
}
Value::undefined()
}
}
fn delete_binding(&mut self, name: &str) -> bool {
self.bindings.remove_property(name);
true
}
fn has_this_binding(&self) -> bool {
false
}
fn get_this_binding(&self) -> Value {
Value::undefined()
}
fn has_super_binding(&self) -> bool {
false
}
fn with_base_object(&self) -> Value {
// Object Environment Records return undefined as their
// WithBaseObject unless their withEnvironment flag is true.
if self.with_environment {
return self.bindings.clone();
}
Value::undefined()
}
fn get_outer_environment(&self) -> Option<Environment> {
match &self.outer_env {
Some(outer) => Some(outer.clone()),
None => None,
}
}
fn set_outer_environment(&mut self, env: Environment) {
self.outer_env = Some(env);
}
fn get_environment_type(&self) -> EnvironmentType {
EnvironmentType::Function
}
fn get_global_object(&self) -> Option<Value> {
if let Some(outer) = &self.outer_env {
outer.borrow().get_global_object()
} else {
None
}
}
}
| 30.801471 | 123 | 0.613034 |
e54cf93f6be3b4975acefad21ab63ceae8147746 | 2,450 | //! A module containing all possible replies the bot could send.
use super::SPOILER_TITLE_SEPARATOR;
/// Informs the user to send the content to be spoiled.
pub(crate) static PREPARING_A_SPOILER: &'static str =
"Preparing a spoiler. To cancel, type /cancel.
First send the content to be spoiled. It can be text, photo, or any other media.";
/// Informs the user that the spoiler creation process has been cancelled.
pub(crate) static SPOILER_CREATION_CANCELLED: &'static str =
"The spoiler creation has been cancelled.";
/// Informs the user that the spoiler is now ready.
pub(crate) static SPOILER_READY: &'static str = "Done! Your advanced spoiler is ready.";
/// Informs the user that the spoiler could not be found.
pub(crate) static SPOILER_NOT_FOUND: &'static str =
"Spoiler not found! It might have expired already...";
/// Informs the user how to start the bot.
pub(crate) static _TYPE_START: &'static str =
"Type /start to prepare an advanced spoiler with a custom title.";
/// Informs the user to now send a title for the spoiler.
pub(crate) static NOW_SEND_A_TITLE: &'static str =
"Now send a title for the spoiler (maximum 256 characters).
It will be immediately visible and can be used to add a small description for your spoiler.
Type a dash (-) now if you do not want a title for your spoiler.";
/// Informs the user to tap again to show the spoiler
pub(crate) static TAP_AGAIN_TO_SHOW_SPOILER: &'static str = "Please tap again to see the spoiler";
/// Informs the user that the bot is not an admin in that chat
pub(crate) static NOT_AN_ADMIN: &'static str =
"I need to be a group admin with permission to delete messages in order to do it!";
/// Informs the user that the bot is an admin in that chat but has no permission to delete messages.
pub(crate) static NO_DELETE_PERMISSION: &'static str =
"I am an admin but I don't have the permission to delete messages from others!";
/// Sends information how to use this bot.
pub(crate) fn help_text(bot_username: String) -> String {
format!(
"Type /start to prepare an advanced spoiler with a custom title.
You can type quick spoilers by using @{} in inline mode:
@{} your spoiler message…
Custom titles can also be used from inline mode as follows:
@{} title for the spoiler{}contents of the spoiler
Note that the title will be immediately visible!",
bot_username, bot_username, bot_username, SPOILER_TITLE_SEPARATOR
)
}
| 43.75 | 100 | 0.738367 |
cc5861c415e8af27b84d0c7e706c7558a8fdcba7 | 697 | #[derive(Debug, Default)]
pub struct RawFileLoaderSource;
impl amethyst::assets::Source for RawFileLoaderSource {
fn modified(&self, _path: &str) -> Result<u64, amethyst::Error> {
Ok(0)
}
fn load(&self, path: &str) -> Result<Vec<u8>, amethyst::Error> {
use crate::initialize_paths;
use std::{fs::File, io::Read};
let (_, _, asset_dir) = initialize_paths().expect("Could not initialize paths");
let path = asset_dir.join(path);
let content = {
let mut file = File::open(path)?;
let mut buffer = Vec::new();
file.read_to_end(&mut buffer)?;
buffer
};
Ok(content)
}
}
| 26.807692 | 88 | 0.56528 |
169caf1f672667e7c064f3ce8e17d5b6b57fdc0d | 15,817 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Method lookup: the secret sauce of Rust. See the [rustc guide] chapter.
//!
//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/method-lookup.html
use check::FnCtxt;
use hir::def::Def;
use hir::def_id::DefId;
use namespace::Namespace;
use rustc::ty::subst::Substs;
use rustc::traits;
use rustc::ty::{self, Ty, ToPredicate, ToPolyTraitRef, TraitRef, TypeFoldable};
use rustc::ty::GenericParamDefKind;
use rustc::ty::subst::Subst;
use rustc::infer::{self, InferOk};
use syntax::ast;
use syntax_pos::Span;
use rustc::hir;
use rustc_data_structures::sync::Lrc;
pub use self::MethodError::*;
pub use self::CandidateSource::*;
pub use self::suggest::TraitInfo;
mod confirm;
pub mod probe;
mod suggest;
use self::probe::{IsSuggestion, ProbeScope};
pub fn provide(providers: &mut ty::maps::Providers) {
suggest::provide(providers);
}
#[derive(Clone, Copy, Debug)]
pub struct MethodCallee<'tcx> {
/// Impl method ID, for inherent methods, or trait method ID, otherwise.
pub def_id: DefId,
pub substs: &'tcx Substs<'tcx>,
/// Instantiated method signature, i.e. it has been
/// substituted, normalized, and has had late-bound
/// lifetimes replaced with inference variables.
pub sig: ty::FnSig<'tcx>,
}
pub enum MethodError<'tcx> {
// Did not find an applicable method, but we did find various near-misses that may work.
NoMatch(NoMatchData<'tcx>),
// Multiple methods might apply.
Ambiguity(Vec<CandidateSource>),
// Found an applicable method, but it is not visible. The second argument contains a list of
// not-in-scope traits which may work.
PrivateMatch(Def, Vec<DefId>),
// Found a `Self: Sized` bound where `Self` is a trait object, also the caller may have
// forgotten to import a trait.
IllegalSizedBound(Vec<DefId>),
// Found a match, but the return type is wrong
BadReturnType,
}
// Contains a list of static methods that may apply, a list of unsatisfied trait predicates which
// could lead to matches if satisfied, and a list of not-in-scope traits which may work.
pub struct NoMatchData<'tcx> {
pub static_candidates: Vec<CandidateSource>,
pub unsatisfied_predicates: Vec<TraitRef<'tcx>>,
pub out_of_scope_traits: Vec<DefId>,
pub lev_candidate: Option<ty::AssociatedItem>,
pub mode: probe::Mode,
}
impl<'tcx> NoMatchData<'tcx> {
pub fn new(static_candidates: Vec<CandidateSource>,
unsatisfied_predicates: Vec<TraitRef<'tcx>>,
out_of_scope_traits: Vec<DefId>,
lev_candidate: Option<ty::AssociatedItem>,
mode: probe::Mode)
-> Self {
NoMatchData {
static_candidates,
unsatisfied_predicates,
out_of_scope_traits,
lev_candidate,
mode,
}
}
}
// A pared down enum describing just the places from which a method
// candidate can arise. Used for error reporting only.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub enum CandidateSource {
ImplSource(DefId),
TraitSource(// trait id
DefId),
}
impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
/// Determines whether the type `self_ty` supports a method name `method_name` or not.
pub fn method_exists(&self,
method_name: ast::Ident,
self_ty: Ty<'tcx>,
call_expr_id: ast::NodeId,
allow_private: bool)
-> bool {
let mode = probe::Mode::MethodCall;
match self.probe_for_name(method_name.span, mode, method_name.name,
IsSuggestion(false), self_ty, call_expr_id,
ProbeScope::TraitsInScope) {
Ok(..) => true,
Err(NoMatch(..)) => false,
Err(Ambiguity(..)) => true,
Err(PrivateMatch(..)) => allow_private,
Err(IllegalSizedBound(..)) => true,
Err(BadReturnType) => {
bug!("no return type expectations but got BadReturnType")
}
}
}
/// Performs method lookup. If lookup is successful, it will return the callee
/// and store an appropriate adjustment for the self-expr. In some cases it may
/// report an error (e.g., invoking the `drop` method).
///
/// # Arguments
///
/// Given a method call like `foo.bar::<T1,...Tn>(...)`:
///
/// * `fcx`: the surrounding `FnCtxt` (!)
/// * `span`: the span for the method call
/// * `method_name`: the name of the method being called (`bar`)
/// * `self_ty`: the (unadjusted) type of the self expression (`foo`)
/// * `supplied_method_types`: the explicit method type parameters, if any (`T1..Tn`)
/// * `self_expr`: the self expression (`foo`)
pub fn lookup_method(&self,
self_ty: Ty<'tcx>,
segment: &hir::PathSegment,
span: Span,
call_expr: &'gcx hir::Expr,
self_expr: &'gcx hir::Expr)
-> Result<MethodCallee<'tcx>, MethodError<'tcx>> {
debug!("lookup(method_name={}, self_ty={:?}, call_expr={:?}, self_expr={:?})",
segment.name,
self_ty,
call_expr,
self_expr);
let pick = self.lookup_probe(
span,
segment.name,
self_ty,
call_expr,
ProbeScope::TraitsInScope
)?;
if let Some(import_id) = pick.import_id {
let import_def_id = self.tcx.hir.local_def_id(import_id);
debug!("used_trait_import: {:?}", import_def_id);
Lrc::get_mut(&mut self.tables.borrow_mut().used_trait_imports)
.unwrap().insert(import_def_id);
}
self.tcx.check_stability(pick.item.def_id, Some(call_expr.id), span);
let result = self.confirm_method(
span,
self_expr,
call_expr,
self_ty,
pick.clone(),
segment,
);
if result.illegal_sized_bound {
// We probe again, taking all traits into account (not only those in scope).
let candidates =
match self.lookup_probe(span,
segment.name,
self_ty,
call_expr,
ProbeScope::AllTraits) {
// If we find a different result the caller probably forgot to import a trait.
Ok(ref new_pick) if *new_pick != pick => vec![new_pick.item.container.id()],
Err(Ambiguity(ref sources)) => {
sources.iter()
.filter_map(|source| {
match *source {
// Note: this cannot come from an inherent impl,
// because the first probing succeeded.
ImplSource(def) => self.tcx.trait_id_of_impl(def),
TraitSource(_) => None,
}
})
.collect()
}
_ => Vec::new(),
};
return Err(IllegalSizedBound(candidates));
}
Ok(result.callee)
}
fn lookup_probe(&self,
span: Span,
method_name: ast::Name,
self_ty: Ty<'tcx>,
call_expr: &'gcx hir::Expr,
scope: ProbeScope)
-> probe::PickResult<'tcx> {
let mode = probe::Mode::MethodCall;
let self_ty = self.resolve_type_vars_if_possible(&self_ty);
self.probe_for_name(span, mode, method_name, IsSuggestion(false),
self_ty, call_expr.id, scope)
}
/// `lookup_method_in_trait` is used for overloaded operators.
/// It does a very narrow slice of what the normal probe/confirm path does.
/// In particular, it doesn't really do any probing: it simply constructs
/// an obligation for a particular trait with the given self-type and checks
/// whether that trait is implemented.
///
/// FIXME(#18741) -- It seems likely that we can consolidate some of this
/// code with the other method-lookup code. In particular, the second half
/// of this method is basically the same as confirmation.
pub fn lookup_method_in_trait(&self,
span: Span,
m_name: ast::Name,
trait_def_id: DefId,
self_ty: Ty<'tcx>,
opt_input_types: Option<&[Ty<'tcx>]>)
-> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
debug!("lookup_in_trait_adjusted(self_ty={:?}, \
m_name={}, trait_def_id={:?})",
self_ty,
m_name,
trait_def_id);
// Construct a trait-reference `self_ty : Trait<input_tys>`
let substs = Substs::for_item(self.tcx, trait_def_id, |param, _| {
match param.kind {
GenericParamDefKind::Lifetime => {}
GenericParamDefKind::Type {..} => {
if param.index == 0 {
return self_ty.into();
} else if let Some(ref input_types) = opt_input_types {
return input_types[param.index as usize - 1].into();
}
}
}
self.var_for_def(span, param)
});
let trait_ref = ty::TraitRef::new(trait_def_id, substs);
// Construct an obligation
let poly_trait_ref = trait_ref.to_poly_trait_ref();
let obligation =
traits::Obligation::misc(span,
self.body_id,
self.param_env,
poly_trait_ref.to_predicate());
// Now we want to know if this can be matched
if !self.predicate_may_hold(&obligation) {
debug!("--> Cannot match obligation");
return None; // Cannot be matched, no such method resolution is possible.
}
// Trait must have a method named `m_name` and it should not have
// type parameters or early-bound regions.
let tcx = self.tcx;
let method_item = self.associated_item(trait_def_id, m_name, Namespace::Value).unwrap();
let def_id = method_item.def_id;
let generics = tcx.generics_of(def_id);
assert_eq!(generics.params.len(), 0);
debug!("lookup_in_trait_adjusted: method_item={:?}", method_item);
let mut obligations = vec![];
// Instantiate late-bound regions and substitute the trait
// parameters into the method type to get the actual method type.
//
// NB: Instantiate late-bound regions first so that
// `instantiate_type_scheme` can normalize associated types that
// may reference those regions.
let fn_sig = tcx.fn_sig(def_id);
let fn_sig = self.replace_late_bound_regions_with_fresh_var(span,
infer::FnCall,
&fn_sig).0;
let fn_sig = fn_sig.subst(self.tcx, substs);
let fn_sig = match self.normalize_associated_types_in_as_infer_ok(span, &fn_sig) {
InferOk { value, obligations: o } => {
obligations.extend(o);
value
}
};
// Register obligations for the parameters. This will include the
// `Self` parameter, which in turn has a bound of the main trait,
// so this also effectively registers `obligation` as well. (We
// used to register `obligation` explicitly, but that resulted in
// double error messages being reported.)
//
// Note that as the method comes from a trait, it should not have
// any late-bound regions appearing in its bounds.
let bounds = self.tcx.predicates_of(def_id).instantiate(self.tcx, substs);
let bounds = match self.normalize_associated_types_in_as_infer_ok(span, &bounds) {
InferOk { value, obligations: o } => {
obligations.extend(o);
value
}
};
assert!(!bounds.has_escaping_regions());
let cause = traits::ObligationCause::misc(span, self.body_id);
obligations.extend(traits::predicates_for_generics(cause.clone(),
self.param_env,
&bounds));
// Also add an obligation for the method type being well-formed.
let method_ty = tcx.mk_fn_ptr(ty::Binder::bind(fn_sig));
debug!("lookup_in_trait_adjusted: matched method method_ty={:?} obligation={:?}",
method_ty,
obligation);
obligations.push(traits::Obligation::new(cause,
self.param_env,
ty::Predicate::WellFormed(method_ty)));
let callee = MethodCallee {
def_id,
substs: trait_ref.substs,
sig: fn_sig,
};
debug!("callee = {:?}", callee);
Some(InferOk {
obligations,
value: callee
})
}
pub fn resolve_ufcs(&self,
span: Span,
method_name: ast::Name,
self_ty: Ty<'tcx>,
expr_id: ast::NodeId)
-> Result<Def, MethodError<'tcx>> {
let mode = probe::Mode::Path;
let pick = self.probe_for_name(span, mode, method_name, IsSuggestion(false),
self_ty, expr_id, ProbeScope::TraitsInScope)?;
if let Some(import_id) = pick.import_id {
let import_def_id = self.tcx.hir.local_def_id(import_id);
debug!("used_trait_import: {:?}", import_def_id);
Lrc::get_mut(&mut self.tables.borrow_mut().used_trait_imports)
.unwrap().insert(import_def_id);
}
let def = pick.item.def();
self.tcx.check_stability(def.def_id(), Some(expr_id), span);
Ok(def)
}
/// Find item with name `item_name` defined in impl/trait `def_id`
/// and return it, or `None`, if no such item was defined there.
pub fn associated_item(&self, def_id: DefId, item_name: ast::Name, ns: Namespace)
-> Option<ty::AssociatedItem> {
self.tcx.associated_items(def_id)
.find(|item| Namespace::from(item.kind) == ns &&
self.tcx.hygienic_eq(item_name, item.name, def_id))
}
}
| 40.043038 | 98 | 0.542644 |
383bbdc006e317b3d179896677f4abfde2926ce1 | 553 | use crate::aliases::{TMat3, TMat4, TVec3};
use crate::RealNumber;
/// Builds a 3x3 matrix `m` such that for any `v`: `m * v == cross(x, v)`.
///
/// # See also:
///
/// * [`matrix_cross`](fn.matrix_cross.html)
pub fn matrix_cross3<T: RealNumber>(x: &TVec3<T>) -> TMat3<T> {
x.cross_matrix()
}
/// Builds a 4x4 matrix `m` such that for any `v`: `m * v == cross(x, v)`.
///
/// # See also:
///
/// * [`matrix_cross3`](fn.matrix_cross3.html)
pub fn matrix_cross<T: RealNumber>(x: &TVec3<T>) -> TMat4<T> {
crate::mat3_to_mat4(&x.cross_matrix())
}
| 26.333333 | 74 | 0.600362 |
1e15eb41bdc62f2c7ba6fccb19e5b8e2dd0ab2e5 | 817 | use crate::primitive::Primitive;
use crate::PancursesRenderer;
use iced_native::widget::slider;
use iced_native::{Point, Rectangle};
use std::ops::RangeInclusive;
impl slider::Renderer for PancursesRenderer {
fn height(&self) -> u32 {
1
}
fn draw(
&mut self,
bounds: Rectangle,
_cursor_position: Point,
range: RangeInclusive<f32>,
value: f32,
_is_dragging: bool,
) -> Primitive {
let (range_start, range_end) = range.into_inner();
let marker_offset =
bounds.width * ((value - range_start) / (range_end - range_start).max(1.0));
Primitive::Group(vec![
Primitive::BoxDisplay(bounds),
Primitive::Char(bounds.x as i32 + marker_offset as i32, bounds.y as i32, 'x'),
])
}
}
| 25.53125 | 90 | 0.599755 |
5bc5222f9fc15666e479f38be15802fb9668df32 | 117,497 | //! Name resolution for lifetimes.
//!
//! Name resolution for lifetimes follows *much* simpler rules than the
//! full resolve. For example, lifetime names are never exported or
//! used between functions, and they operate in a purely top-down
//! way. Therefore, we break lifetime name resolution into a separate pass.
use crate::diagnostics::{ForLifetimeSpanType, MissingLifetimeSpot};
use rustc::hir::map::Map;
use rustc::lint;
use rustc::middle::resolve_lifetime::*;
use rustc::ty::{self, DefIdTree, GenericParamDefKind, TyCtxt};
use rustc::{bug, span_bug};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder};
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LOCAL_CRATE};
use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc_hir::{GenericArg, GenericParam, LifetimeName, Node, ParamName, QPath};
use rustc_hir::{GenericParamKind, HirIdMap, HirIdSet, LifetimeParamKind};
use rustc_span::symbol::{kw, sym};
use rustc_span::Span;
use std::borrow::Cow;
use std::cell::Cell;
use std::mem::{replace, take};
use syntax::ast;
use syntax::attr;
use syntax::walk_list;
use log::debug;
// This counts the no of times a lifetime is used
#[derive(Clone, Copy, Debug)]
pub enum LifetimeUseSet<'tcx> {
One(&'tcx hir::Lifetime),
Many,
}
trait RegionExt {
fn early(hir_map: &Map<'_>, index: &mut u32, param: &GenericParam<'_>) -> (ParamName, Region);
fn late(hir_map: &Map<'_>, param: &GenericParam<'_>) -> (ParamName, Region);
fn late_anon(index: &Cell<u32>) -> Region;
fn id(&self) -> Option<DefId>;
fn shifted(self, amount: u32) -> Region;
fn shifted_out_to_binder(self, binder: ty::DebruijnIndex) -> Region;
fn subst<'a, L>(self, params: L, map: &NamedRegionMap) -> Option<Region>
where
L: Iterator<Item = &'a hir::Lifetime>;
}
impl RegionExt for Region {
fn early(hir_map: &Map<'_>, index: &mut u32, param: &GenericParam<'_>) -> (ParamName, Region) {
let i = *index;
*index += 1;
let def_id = hir_map.local_def_id(param.hir_id);
let origin = LifetimeDefOrigin::from_param(param);
debug!("Region::early: index={} def_id={:?}", i, def_id);
(param.name.modern(), Region::EarlyBound(i, def_id, origin))
}
fn late(hir_map: &Map<'_>, param: &GenericParam<'_>) -> (ParamName, Region) {
let depth = ty::INNERMOST;
let def_id = hir_map.local_def_id(param.hir_id);
let origin = LifetimeDefOrigin::from_param(param);
debug!(
"Region::late: param={:?} depth={:?} def_id={:?} origin={:?}",
param, depth, def_id, origin,
);
(param.name.modern(), Region::LateBound(depth, def_id, origin))
}
fn late_anon(index: &Cell<u32>) -> Region {
let i = index.get();
index.set(i + 1);
let depth = ty::INNERMOST;
Region::LateBoundAnon(depth, i)
}
fn id(&self) -> Option<DefId> {
match *self {
Region::Static | Region::LateBoundAnon(..) => None,
Region::EarlyBound(_, id, _) | Region::LateBound(_, id, _) | Region::Free(_, id) => {
Some(id)
}
}
}
fn shifted(self, amount: u32) -> Region {
match self {
Region::LateBound(debruijn, id, origin) => {
Region::LateBound(debruijn.shifted_in(amount), id, origin)
}
Region::LateBoundAnon(debruijn, index) => {
Region::LateBoundAnon(debruijn.shifted_in(amount), index)
}
_ => self,
}
}
fn shifted_out_to_binder(self, binder: ty::DebruijnIndex) -> Region {
match self {
Region::LateBound(debruijn, id, origin) => {
Region::LateBound(debruijn.shifted_out_to_binder(binder), id, origin)
}
Region::LateBoundAnon(debruijn, index) => {
Region::LateBoundAnon(debruijn.shifted_out_to_binder(binder), index)
}
_ => self,
}
}
fn subst<'a, L>(self, mut params: L, map: &NamedRegionMap) -> Option<Region>
where
L: Iterator<Item = &'a hir::Lifetime>,
{
if let Region::EarlyBound(index, _, _) = self {
params.nth(index as usize).and_then(|lifetime| map.defs.get(&lifetime.hir_id).cloned())
} else {
Some(self)
}
}
}
/// Maps the id of each lifetime reference to the lifetime decl
/// that it corresponds to.
///
/// FIXME. This struct gets converted to a `ResolveLifetimes` for
/// actual use. It has the same data, but indexed by `DefIndex`. This
/// is silly.
#[derive(Default)]
struct NamedRegionMap {
// maps from every use of a named (not anonymous) lifetime to a
// `Region` describing how that region is bound
defs: HirIdMap<Region>,
// the set of lifetime def ids that are late-bound; a region can
// be late-bound if (a) it does NOT appear in a where-clause and
// (b) it DOES appear in the arguments.
late_bound: HirIdSet,
// For each type and trait definition, maps type parameters
// to the trait object lifetime defaults computed from them.
object_lifetime_defaults: HirIdMap<Vec<ObjectLifetimeDefault>>,
}
crate struct LifetimeContext<'a, 'tcx> {
crate tcx: TyCtxt<'tcx>,
map: &'a mut NamedRegionMap,
scope: ScopeRef<'a>,
/// This is slightly complicated. Our representation for poly-trait-refs contains a single
/// binder and thus we only allow a single level of quantification. However,
/// the syntax of Rust permits quantification in two places, e.g., `T: for <'a> Foo<'a>`
/// and `for <'a, 'b> &'b T: Foo<'a>`. In order to get the De Bruijn indices
/// correct when representing these constraints, we should only introduce one
/// scope. However, we want to support both locations for the quantifier and
/// during lifetime resolution we want precise information (so we can't
/// desugar in an earlier phase).
///
/// So, if we encounter a quantifier at the outer scope, we set
/// `trait_ref_hack` to `true` (and introduce a scope), and then if we encounter
/// a quantifier at the inner scope, we error. If `trait_ref_hack` is `false`,
/// then we introduce the scope at the inner quantifier.
trait_ref_hack: bool,
/// Used to disallow the use of in-band lifetimes in `fn` or `Fn` syntax.
is_in_fn_syntax: bool,
/// List of labels in the function/method currently under analysis.
labels_in_fn: Vec<ast::Ident>,
/// Cache for cross-crate per-definition object lifetime defaults.
xcrate_object_lifetime_defaults: DefIdMap<Vec<ObjectLifetimeDefault>>,
lifetime_uses: &'a mut DefIdMap<LifetimeUseSet<'tcx>>,
/// When encountering an undefined named lifetime, we will suggest introducing it in these
/// places.
crate missing_named_lifetime_spots: Vec<MissingLifetimeSpot<'tcx>>,
}
#[derive(Debug)]
enum Scope<'a> {
/// Declares lifetimes, and each can be early-bound or late-bound.
/// The `DebruijnIndex` of late-bound lifetimes starts at `1` and
/// it should be shifted by the number of `Binder`s in between the
/// declaration `Binder` and the location it's referenced from.
Binder {
lifetimes: FxHashMap<hir::ParamName, Region>,
/// if we extend this scope with another scope, what is the next index
/// we should use for an early-bound region?
next_early_index: u32,
/// Flag is set to true if, in this binder, `'_` would be
/// equivalent to a "single-use region". This is true on
/// impls, but not other kinds of items.
track_lifetime_uses: bool,
/// Whether or not this binder would serve as the parent
/// binder for opaque types introduced within. For example:
///
/// ```text
/// fn foo<'a>() -> impl for<'b> Trait<Item = impl Trait2<'a>>
/// ```
///
/// Here, the opaque types we create for the `impl Trait`
/// and `impl Trait2` references will both have the `foo` item
/// as their parent. When we get to `impl Trait2`, we find
/// that it is nested within the `for<>` binder -- this flag
/// allows us to skip that when looking for the parent binder
/// of the resulting opaque type.
opaque_type_parent: bool,
s: ScopeRef<'a>,
},
/// Lifetimes introduced by a fn are scoped to the call-site for that fn,
/// if this is a fn body, otherwise the original definitions are used.
/// Unspecified lifetimes are inferred, unless an elision scope is nested,
/// e.g., `(&T, fn(&T) -> &T);` becomes `(&'_ T, for<'a> fn(&'a T) -> &'a T)`.
Body {
id: hir::BodyId,
s: ScopeRef<'a>,
},
/// A scope which either determines unspecified lifetimes or errors
/// on them (e.g., due to ambiguity). For more details, see `Elide`.
Elision {
elide: Elide,
s: ScopeRef<'a>,
},
/// Use a specific lifetime (if `Some`) or leave it unset (to be
/// inferred in a function body or potentially error outside one),
/// for the default choice of lifetime in a trait object type.
ObjectLifetimeDefault {
lifetime: Option<Region>,
s: ScopeRef<'a>,
},
Root,
}
#[derive(Clone, Debug)]
enum Elide {
/// Use a fresh anonymous late-bound lifetime each time, by
/// incrementing the counter to generate sequential indices.
FreshLateAnon(Cell<u32>),
/// Always use this one lifetime.
Exact(Region),
/// Less or more than one lifetime were found, error on unspecified.
Error(Vec<ElisionFailureInfo>),
}
#[derive(Clone, Debug)]
crate struct ElisionFailureInfo {
/// Where we can find the argument pattern.
parent: Option<hir::BodyId>,
/// The index of the argument in the original definition.
index: usize,
lifetime_count: usize,
have_bound_regions: bool,
crate span: Span,
}
type ScopeRef<'a> = &'a Scope<'a>;
const ROOT_SCOPE: ScopeRef<'static> = &Scope::Root;
pub fn provide(providers: &mut ty::query::Providers<'_>) {
*providers = ty::query::Providers {
resolve_lifetimes,
named_region_map: |tcx, id| {
let id = LocalDefId::from_def_id(DefId::local(id)); // (*)
tcx.resolve_lifetimes(LOCAL_CRATE).defs.get(&id)
},
is_late_bound_map: |tcx, id| {
let id = LocalDefId::from_def_id(DefId::local(id)); // (*)
tcx.resolve_lifetimes(LOCAL_CRATE).late_bound.get(&id)
},
object_lifetime_defaults_map: |tcx, id| {
let id = LocalDefId::from_def_id(DefId::local(id)); // (*)
tcx.resolve_lifetimes(LOCAL_CRATE).object_lifetime_defaults.get(&id)
},
..*providers
};
// (*) FIXME the query should be defined to take a LocalDefId
}
/// Computes the `ResolveLifetimes` map that contains data for the
/// entire crate. You should not read the result of this query
/// directly, but rather use `named_region_map`, `is_late_bound_map`,
/// etc.
fn resolve_lifetimes(tcx: TyCtxt<'_>, for_krate: CrateNum) -> &ResolveLifetimes {
assert_eq!(for_krate, LOCAL_CRATE);
let named_region_map = krate(tcx);
let mut rl = ResolveLifetimes::default();
for (hir_id, v) in named_region_map.defs {
let map = rl.defs.entry(hir_id.owner_local_def_id()).or_default();
map.insert(hir_id.local_id, v);
}
for hir_id in named_region_map.late_bound {
let map = rl.late_bound.entry(hir_id.owner_local_def_id()).or_default();
map.insert(hir_id.local_id);
}
for (hir_id, v) in named_region_map.object_lifetime_defaults {
let map = rl.object_lifetime_defaults.entry(hir_id.owner_local_def_id()).or_default();
map.insert(hir_id.local_id, v);
}
tcx.arena.alloc(rl)
}
fn krate(tcx: TyCtxt<'_>) -> NamedRegionMap {
let krate = tcx.hir().krate();
let mut map = NamedRegionMap {
defs: Default::default(),
late_bound: Default::default(),
object_lifetime_defaults: compute_object_lifetime_defaults(tcx),
};
{
let mut visitor = LifetimeContext {
tcx,
map: &mut map,
scope: ROOT_SCOPE,
trait_ref_hack: false,
is_in_fn_syntax: false,
labels_in_fn: vec![],
xcrate_object_lifetime_defaults: Default::default(),
lifetime_uses: &mut Default::default(),
missing_named_lifetime_spots: vec![],
};
for (_, item) in &krate.items {
visitor.visit_item(item);
}
}
map
}
/// In traits, there is an implicit `Self` type parameter which comes before the generics.
/// We have to account for this when computing the index of the other generic parameters.
/// This function returns whether there is such an implicit parameter defined on the given item.
fn sub_items_have_self_param(node: &hir::ItemKind<'_>) -> bool {
match *node {
hir::ItemKind::Trait(..) | hir::ItemKind::TraitAlias(..) => true,
_ => false,
}
}
impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> {
type Map = Map<'tcx>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<'_, Self::Map> {
NestedVisitorMap::All(&self.tcx.hir())
}
// We want to nest trait/impl items in their parent, but nothing else.
fn visit_nested_item(&mut self, _: hir::ItemId) {}
fn visit_nested_body(&mut self, body: hir::BodyId) {
// Each body has their own set of labels, save labels.
let saved = take(&mut self.labels_in_fn);
let body = self.tcx.hir().body(body);
extract_labels(self, body);
self.with(Scope::Body { id: body.id(), s: self.scope }, |_, this| {
this.visit_body(body);
});
replace(&mut self.labels_in_fn, saved);
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
match item.kind {
hir::ItemKind::Fn(ref sig, ref generics, _) => {
self.missing_named_lifetime_spots.push(generics.into());
self.visit_early_late(None, &sig.decl, generics, |this| {
intravisit::walk_item(this, item);
});
self.missing_named_lifetime_spots.pop();
}
hir::ItemKind::ExternCrate(_)
| hir::ItemKind::Use(..)
| hir::ItemKind::Mod(..)
| hir::ItemKind::ForeignMod(..)
| hir::ItemKind::GlobalAsm(..) => {
// These sorts of items have no lifetime parameters at all.
intravisit::walk_item(self, item);
}
hir::ItemKind::Static(..) | hir::ItemKind::Const(..) => {
// No lifetime parameters, but implied 'static.
let scope = Scope::Elision { elide: Elide::Exact(Region::Static), s: ROOT_SCOPE };
self.with(scope, |_, this| intravisit::walk_item(this, item));
}
hir::ItemKind::OpaqueTy(hir::OpaqueTy { impl_trait_fn: Some(_), .. }) => {
// Currently opaque type declarations are just generated from `impl Trait`
// items. Doing anything on this node is irrelevant, as we currently don't need
// it.
}
hir::ItemKind::TyAlias(_, ref generics)
| hir::ItemKind::OpaqueTy(hir::OpaqueTy {
impl_trait_fn: None, ref generics, ..
})
| hir::ItemKind::Enum(_, ref generics)
| hir::ItemKind::Struct(_, ref generics)
| hir::ItemKind::Union(_, ref generics)
| hir::ItemKind::Trait(_, _, ref generics, ..)
| hir::ItemKind::TraitAlias(ref generics, ..)
| hir::ItemKind::Impl { ref generics, .. } => {
self.missing_named_lifetime_spots.push(generics.into());
// Impls permit `'_` to be used and it is equivalent to "some fresh lifetime name".
// This is not true for other kinds of items.x
let track_lifetime_uses = match item.kind {
hir::ItemKind::Impl { .. } => true,
_ => false,
};
// These kinds of items have only early-bound lifetime parameters.
let mut index = if sub_items_have_self_param(&item.kind) {
1 // Self comes before lifetimes
} else {
0
};
let mut non_lifetime_count = 0;
let lifetimes = generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(Region::early(&self.tcx.hir(), &mut index, param))
}
GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
non_lifetime_count += 1;
None
}
})
.collect();
let scope = Scope::Binder {
lifetimes,
next_early_index: index + non_lifetime_count,
opaque_type_parent: true,
track_lifetime_uses,
s: ROOT_SCOPE,
};
self.with(scope, |old_scope, this| {
this.check_lifetime_params(old_scope, &generics.params);
intravisit::walk_item(this, item);
});
self.missing_named_lifetime_spots.pop();
}
}
}
fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
match item.kind {
hir::ForeignItemKind::Fn(ref decl, _, ref generics) => {
self.visit_early_late(None, decl, generics, |this| {
intravisit::walk_foreign_item(this, item);
})
}
hir::ForeignItemKind::Static(..) => {
intravisit::walk_foreign_item(self, item);
}
hir::ForeignItemKind::Type => {
intravisit::walk_foreign_item(self, item);
}
}
}
fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
debug!("visit_ty: id={:?} ty={:?}", ty.hir_id, ty);
debug!("visit_ty: ty.kind={:?}", ty.kind);
match ty.kind {
hir::TyKind::BareFn(ref c) => {
let next_early_index = self.next_early_index();
let was_in_fn_syntax = self.is_in_fn_syntax;
self.is_in_fn_syntax = true;
let lifetime_span: Option<Span> = c
.generic_params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => Some(param.span),
_ => None,
})
.last();
let (span, span_type) = if let Some(span) = lifetime_span {
(span.shrink_to_hi(), ForLifetimeSpanType::TypeTail)
} else {
(ty.span.shrink_to_lo(), ForLifetimeSpanType::TypeEmpty)
};
self.missing_named_lifetime_spots
.push(MissingLifetimeSpot::HigherRanked { span, span_type });
let scope = Scope::Binder {
lifetimes: c
.generic_params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(Region::late(&self.tcx.hir(), param))
}
_ => None,
})
.collect(),
s: self.scope,
next_early_index,
track_lifetime_uses: true,
opaque_type_parent: false,
};
self.with(scope, |old_scope, this| {
// a bare fn has no bounds, so everything
// contained within is scoped within its binder.
this.check_lifetime_params(old_scope, &c.generic_params);
intravisit::walk_ty(this, ty);
});
self.missing_named_lifetime_spots.pop();
self.is_in_fn_syntax = was_in_fn_syntax;
}
hir::TyKind::TraitObject(bounds, ref lifetime) => {
debug!("visit_ty: TraitObject(bounds={:?}, lifetime={:?})", bounds, lifetime);
for bound in bounds {
self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None);
}
match lifetime.name {
LifetimeName::Implicit => {
// For types like `dyn Foo`, we should
// generate a special form of elided.
span_bug!(ty.span, "object-lifetime-default expected, not implict",);
}
LifetimeName::ImplicitObjectLifetimeDefault => {
// If the user does not write *anything*, we
// use the object lifetime defaulting
// rules. So e.g., `Box<dyn Debug>` becomes
// `Box<dyn Debug + 'static>`.
self.resolve_object_lifetime_default(lifetime)
}
LifetimeName::Underscore => {
// If the user writes `'_`, we use the *ordinary* elision
// rules. So the `'_` in e.g., `Box<dyn Debug + '_>` will be
// resolved the same as the `'_` in `&'_ Foo`.
//
// cc #48468
self.resolve_elided_lifetimes(vec![lifetime])
}
LifetimeName::Param(_) | LifetimeName::Static => {
// If the user wrote an explicit name, use that.
self.visit_lifetime(lifetime);
}
LifetimeName::Error => {}
}
}
hir::TyKind::Rptr(ref lifetime_ref, ref mt) => {
self.visit_lifetime(lifetime_ref);
let scope = Scope::ObjectLifetimeDefault {
lifetime: self.map.defs.get(&lifetime_ref.hir_id).cloned(),
s: self.scope,
};
self.with(scope, |_, this| this.visit_ty(&mt.ty));
}
hir::TyKind::Def(item_id, lifetimes) => {
// Resolve the lifetimes in the bounds to the lifetime defs in the generics.
// `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
// `type MyAnonTy<'b> = impl MyTrait<'b>;`
// ^ ^ this gets resolved in the scope of
// the opaque_ty generics
let (generics, bounds) = match self.tcx.hir().expect_item(item_id.id).kind {
// Named opaque `impl Trait` types are reached via `TyKind::Path`.
// This arm is for `impl Trait` in the types of statics, constants and locals.
hir::ItemKind::OpaqueTy(hir::OpaqueTy { impl_trait_fn: None, .. }) => {
intravisit::walk_ty(self, ty);
return;
}
// RPIT (return position impl trait)
hir::ItemKind::OpaqueTy(hir::OpaqueTy { ref generics, bounds, .. }) => {
(generics, bounds)
}
ref i => bug!("`impl Trait` pointed to non-opaque type?? {:#?}", i),
};
// Resolve the lifetimes that are applied to the opaque type.
// These are resolved in the current scope.
// `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
// `fn foo<'a>() -> MyAnonTy<'a> { ... }`
// ^ ^this gets resolved in the current scope
for lifetime in lifetimes {
if let hir::GenericArg::Lifetime(lifetime) = lifetime {
self.visit_lifetime(lifetime);
// Check for predicates like `impl for<'a> Trait<impl OtherTrait<'a>>`
// and ban them. Type variables instantiated inside binders aren't
// well-supported at the moment, so this doesn't work.
// In the future, this should be fixed and this error should be removed.
let def = self.map.defs.get(&lifetime.hir_id).cloned();
if let Some(Region::LateBound(_, def_id, _)) = def {
if let Some(hir_id) = self.tcx.hir().as_local_hir_id(def_id) {
// Ensure that the parent of the def is an item, not HRTB
let parent_id = self.tcx.hir().get_parent_node(hir_id);
let parent_impl_id = hir::ImplItemId { hir_id: parent_id };
let parent_trait_id = hir::TraitItemId { hir_id: parent_id };
let krate = self.tcx.hir().krate();
if !(krate.items.contains_key(&parent_id)
|| krate.impl_items.contains_key(&parent_impl_id)
|| krate.trait_items.contains_key(&parent_trait_id))
{
struct_span_err!(
self.tcx.sess,
lifetime.span,
E0657,
"`impl Trait` can only capture lifetimes \
bound at the fn or impl level"
)
.emit();
self.uninsert_lifetime_on_error(lifetime, def.unwrap());
}
}
}
}
}
// We want to start our early-bound indices at the end of the parent scope,
// not including any parent `impl Trait`s.
let mut index = self.next_early_index_for_opaque_type();
debug!("visit_ty: index = {}", index);
let mut elision = None;
let mut lifetimes = FxHashMap::default();
let mut non_lifetime_count = 0;
for param in generics.params {
match param.kind {
GenericParamKind::Lifetime { .. } => {
let (name, reg) = Region::early(&self.tcx.hir(), &mut index, ¶m);
let def_id = if let Region::EarlyBound(_, def_id, _) = reg {
def_id
} else {
bug!();
};
if let hir::ParamName::Plain(param_name) = name {
if param_name.name == kw::UnderscoreLifetime {
// Pick the elided lifetime "definition" if one exists
// and use it to make an elision scope.
self.lifetime_uses.insert(def_id.clone(), LifetimeUseSet::Many);
elision = Some(reg);
} else {
lifetimes.insert(name, reg);
}
} else {
self.lifetime_uses.insert(def_id.clone(), LifetimeUseSet::Many);
lifetimes.insert(name, reg);
}
}
GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
non_lifetime_count += 1;
}
}
}
let next_early_index = index + non_lifetime_count;
if let Some(elision_region) = elision {
let scope =
Scope::Elision { elide: Elide::Exact(elision_region), s: self.scope };
self.with(scope, |_old_scope, this| {
let scope = Scope::Binder {
lifetimes,
next_early_index,
s: this.scope,
track_lifetime_uses: true,
opaque_type_parent: false,
};
this.with(scope, |_old_scope, this| {
this.visit_generics(generics);
for bound in bounds {
this.visit_param_bound(bound);
}
});
});
} else {
let scope = Scope::Binder {
lifetimes,
next_early_index,
s: self.scope,
track_lifetime_uses: true,
opaque_type_parent: false,
};
self.with(scope, |_old_scope, this| {
this.visit_generics(generics);
for bound in bounds {
this.visit_param_bound(bound);
}
});
}
}
_ => intravisit::walk_ty(self, ty),
}
}
fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
use self::hir::TraitItemKind::*;
self.missing_named_lifetime_spots.push((&trait_item.generics).into());
match trait_item.kind {
Method(ref sig, _) => {
let tcx = self.tcx;
self.visit_early_late(
Some(tcx.hir().get_parent_item(trait_item.hir_id)),
&sig.decl,
&trait_item.generics,
|this| intravisit::walk_trait_item(this, trait_item),
);
}
Type(bounds, ref ty) => {
let generics = &trait_item.generics;
let mut index = self.next_early_index();
debug!("visit_ty: index = {}", index);
let mut non_lifetime_count = 0;
let lifetimes = generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(Region::early(&self.tcx.hir(), &mut index, param))
}
GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
non_lifetime_count += 1;
None
}
})
.collect();
let scope = Scope::Binder {
lifetimes,
next_early_index: index + non_lifetime_count,
s: self.scope,
track_lifetime_uses: true,
opaque_type_parent: true,
};
self.with(scope, |old_scope, this| {
this.check_lifetime_params(old_scope, &generics.params);
this.visit_generics(generics);
for bound in bounds {
this.visit_param_bound(bound);
}
if let Some(ty) = ty {
this.visit_ty(ty);
}
});
}
Const(_, _) => {
// Only methods and types support generics.
assert!(trait_item.generics.params.is_empty());
intravisit::walk_trait_item(self, trait_item);
}
}
self.missing_named_lifetime_spots.pop();
}
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
use self::hir::ImplItemKind::*;
self.missing_named_lifetime_spots.push((&impl_item.generics).into());
match impl_item.kind {
Method(ref sig, _) => {
let tcx = self.tcx;
self.visit_early_late(
Some(tcx.hir().get_parent_item(impl_item.hir_id)),
&sig.decl,
&impl_item.generics,
|this| intravisit::walk_impl_item(this, impl_item),
)
}
TyAlias(ref ty) => {
let generics = &impl_item.generics;
let mut index = self.next_early_index();
let mut non_lifetime_count = 0;
debug!("visit_ty: index = {}", index);
let lifetimes = generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(Region::early(&self.tcx.hir(), &mut index, param))
}
GenericParamKind::Const { .. } | GenericParamKind::Type { .. } => {
non_lifetime_count += 1;
None
}
})
.collect();
let scope = Scope::Binder {
lifetimes,
next_early_index: index + non_lifetime_count,
s: self.scope,
track_lifetime_uses: true,
opaque_type_parent: true,
};
self.with(scope, |old_scope, this| {
this.check_lifetime_params(old_scope, &generics.params);
this.visit_generics(generics);
this.visit_ty(ty);
});
}
OpaqueTy(bounds) => {
let generics = &impl_item.generics;
let mut index = self.next_early_index();
let mut next_early_index = index;
debug!("visit_ty: index = {}", index);
let lifetimes = generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(Region::early(&self.tcx.hir(), &mut index, param))
}
GenericParamKind::Type { .. } => {
next_early_index += 1;
None
}
GenericParamKind::Const { .. } => {
next_early_index += 1;
None
}
})
.collect();
let scope = Scope::Binder {
lifetimes,
next_early_index,
s: self.scope,
track_lifetime_uses: true,
opaque_type_parent: true,
};
self.with(scope, |_old_scope, this| {
this.visit_generics(generics);
for bound in bounds {
this.visit_param_bound(bound);
}
});
}
Const(_, _) => {
// Only methods and types support generics.
assert!(impl_item.generics.params.is_empty());
intravisit::walk_impl_item(self, impl_item);
}
}
self.missing_named_lifetime_spots.pop();
}
fn visit_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
debug!("visit_lifetime(lifetime_ref={:?})", lifetime_ref);
if lifetime_ref.is_elided() {
self.resolve_elided_lifetimes(vec![lifetime_ref]);
return;
}
if lifetime_ref.is_static() {
self.insert_lifetime(lifetime_ref, Region::Static);
return;
}
self.resolve_lifetime_ref(lifetime_ref);
}
fn visit_path(&mut self, path: &'tcx hir::Path<'tcx>, _: hir::HirId) {
for (i, segment) in path.segments.iter().enumerate() {
let depth = path.segments.len() - i - 1;
if let Some(ref args) = segment.args {
self.visit_segment_args(path.res, depth, args);
}
}
}
fn visit_fn_decl(&mut self, fd: &'tcx hir::FnDecl<'tcx>) {
let output = match fd.output {
hir::FunctionRetTy::DefaultReturn(_) => None,
hir::FunctionRetTy::Return(ref ty) => Some(&**ty),
};
self.visit_fn_like_elision(&fd.inputs, output);
}
fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
check_mixed_explicit_and_in_band_defs(self.tcx, &generics.params);
for param in generics.params {
match param.kind {
GenericParamKind::Lifetime { .. } => {}
GenericParamKind::Type { ref default, .. } => {
walk_list!(self, visit_param_bound, param.bounds);
if let Some(ref ty) = default {
self.visit_ty(&ty);
}
}
GenericParamKind::Const { ref ty, .. } => {
walk_list!(self, visit_param_bound, param.bounds);
self.visit_ty(&ty);
}
}
}
for predicate in generics.where_clause.predicates {
match predicate {
&hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
ref bounded_ty,
bounds,
ref bound_generic_params,
..
}) => {
let lifetimes: FxHashMap<_, _> = bound_generic_params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(Region::late(&self.tcx.hir(), param))
}
_ => None,
})
.collect();
if !lifetimes.is_empty() {
self.trait_ref_hack = true;
let next_early_index = self.next_early_index();
let scope = Scope::Binder {
lifetimes,
s: self.scope,
next_early_index,
track_lifetime_uses: true,
opaque_type_parent: false,
};
let result = self.with(scope, |old_scope, this| {
this.check_lifetime_params(old_scope, &bound_generic_params);
this.visit_ty(&bounded_ty);
walk_list!(this, visit_param_bound, bounds);
});
self.trait_ref_hack = false;
result
} else {
self.visit_ty(&bounded_ty);
walk_list!(self, visit_param_bound, bounds);
}
}
&hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
ref lifetime,
bounds,
..
}) => {
self.visit_lifetime(lifetime);
walk_list!(self, visit_param_bound, bounds);
}
&hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
ref lhs_ty,
ref rhs_ty,
..
}) => {
self.visit_ty(lhs_ty);
self.visit_ty(rhs_ty);
}
}
}
}
fn visit_poly_trait_ref(
&mut self,
trait_ref: &'tcx hir::PolyTraitRef<'tcx>,
_modifier: hir::TraitBoundModifier,
) {
debug!("visit_poly_trait_ref(trait_ref={:?})", trait_ref);
let should_pop_missing_lt = self.is_trait_ref_fn_scope(trait_ref);
if !self.trait_ref_hack
|| trait_ref.bound_generic_params.iter().any(|param| match param.kind {
GenericParamKind::Lifetime { .. } => true,
_ => false,
})
{
if self.trait_ref_hack {
struct_span_err!(
self.tcx.sess,
trait_ref.span,
E0316,
"nested quantification of lifetimes"
)
.emit();
}
let next_early_index = self.next_early_index();
let scope = Scope::Binder {
lifetimes: trait_ref
.bound_generic_params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
Some(Region::late(&self.tcx.hir(), param))
}
_ => None,
})
.collect(),
s: self.scope,
next_early_index,
track_lifetime_uses: true,
opaque_type_parent: false,
};
self.with(scope, |old_scope, this| {
this.check_lifetime_params(old_scope, &trait_ref.bound_generic_params);
walk_list!(this, visit_generic_param, trait_ref.bound_generic_params);
this.visit_trait_ref(&trait_ref.trait_ref);
})
} else {
self.visit_trait_ref(&trait_ref.trait_ref);
}
if should_pop_missing_lt {
self.missing_named_lifetime_spots.pop();
}
}
}
#[derive(Copy, Clone, PartialEq)]
enum ShadowKind {
Label,
Lifetime,
}
struct Original {
kind: ShadowKind,
span: Span,
}
struct Shadower {
kind: ShadowKind,
span: Span,
}
fn original_label(span: Span) -> Original {
Original { kind: ShadowKind::Label, span: span }
}
fn shadower_label(span: Span) -> Shadower {
Shadower { kind: ShadowKind::Label, span: span }
}
fn original_lifetime(span: Span) -> Original {
Original { kind: ShadowKind::Lifetime, span: span }
}
fn shadower_lifetime(param: &hir::GenericParam<'_>) -> Shadower {
Shadower { kind: ShadowKind::Lifetime, span: param.span }
}
impl ShadowKind {
fn desc(&self) -> &'static str {
match *self {
ShadowKind::Label => "label",
ShadowKind::Lifetime => "lifetime",
}
}
}
fn check_mixed_explicit_and_in_band_defs(tcx: TyCtxt<'_>, params: &[hir::GenericParam<'_>]) {
let lifetime_params: Vec<_> = params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { kind, .. } => Some((kind, param.span)),
_ => None,
})
.collect();
let explicit = lifetime_params.iter().find(|(kind, _)| *kind == LifetimeParamKind::Explicit);
let in_band = lifetime_params.iter().find(|(kind, _)| *kind == LifetimeParamKind::InBand);
if let (Some((_, explicit_span)), Some((_, in_band_span))) = (explicit, in_band) {
struct_span_err!(
tcx.sess,
*in_band_span,
E0688,
"cannot mix in-band and explicit lifetime definitions"
)
.span_label(*in_band_span, "in-band lifetime definition here")
.span_label(*explicit_span, "explicit lifetime definition here")
.emit();
}
}
fn signal_shadowing_problem(tcx: TyCtxt<'_>, name: ast::Name, orig: Original, shadower: Shadower) {
let mut err = if let (ShadowKind::Lifetime, ShadowKind::Lifetime) = (orig.kind, shadower.kind) {
// lifetime/lifetime shadowing is an error
struct_span_err!(
tcx.sess,
shadower.span,
E0496,
"{} name `{}` shadows a \
{} name that is already in scope",
shadower.kind.desc(),
name,
orig.kind.desc()
)
} else {
// shadowing involving a label is only a warning, due to issues with
// labels and lifetimes not being macro-hygienic.
tcx.sess.struct_span_warn(
shadower.span,
&format!(
"{} name `{}` shadows a \
{} name that is already in scope",
shadower.kind.desc(),
name,
orig.kind.desc()
),
)
};
err.span_label(orig.span, "first declared here");
err.span_label(shadower.span, format!("lifetime {} already in scope", name));
err.emit();
}
// Adds all labels in `b` to `ctxt.labels_in_fn`, signalling a warning
// if one of the label shadows a lifetime or another label.
fn extract_labels(ctxt: &mut LifetimeContext<'_, '_>, body: &hir::Body<'_>) {
struct GatherLabels<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
scope: ScopeRef<'a>,
labels_in_fn: &'a mut Vec<ast::Ident>,
}
let mut gather =
GatherLabels { tcx: ctxt.tcx, scope: ctxt.scope, labels_in_fn: &mut ctxt.labels_in_fn };
gather.visit_body(body);
impl<'v, 'a, 'tcx> Visitor<'v> for GatherLabels<'a, 'tcx> {
type Map = Map<'v>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<'_, Self::Map> {
NestedVisitorMap::None
}
fn visit_expr(&mut self, ex: &hir::Expr<'_>) {
if let Some(label) = expression_label(ex) {
for prior_label in &self.labels_in_fn[..] {
// FIXME (#24278): non-hygienic comparison
if label.name == prior_label.name {
signal_shadowing_problem(
self.tcx,
label.name,
original_label(prior_label.span),
shadower_label(label.span),
);
}
}
check_if_label_shadows_lifetime(self.tcx, self.scope, label);
self.labels_in_fn.push(label);
}
intravisit::walk_expr(self, ex)
}
}
fn expression_label(ex: &hir::Expr<'_>) -> Option<ast::Ident> {
if let hir::ExprKind::Loop(_, Some(label), _) = ex.kind { Some(label.ident) } else { None }
}
fn check_if_label_shadows_lifetime(
tcx: TyCtxt<'_>,
mut scope: ScopeRef<'_>,
label: ast::Ident,
) {
loop {
match *scope {
Scope::Body { s, .. }
| Scope::Elision { s, .. }
| Scope::ObjectLifetimeDefault { s, .. } => {
scope = s;
}
Scope::Root => {
return;
}
Scope::Binder { ref lifetimes, s, .. } => {
// FIXME (#24278): non-hygienic comparison
if let Some(def) = lifetimes.get(&hir::ParamName::Plain(label.modern())) {
let hir_id = tcx.hir().as_local_hir_id(def.id().unwrap()).unwrap();
signal_shadowing_problem(
tcx,
label.name,
original_lifetime(tcx.hir().span(hir_id)),
shadower_label(label.span),
);
return;
}
scope = s;
}
}
}
}
}
fn compute_object_lifetime_defaults(tcx: TyCtxt<'_>) -> HirIdMap<Vec<ObjectLifetimeDefault>> {
let mut map = HirIdMap::default();
for item in tcx.hir().krate().items.values() {
match item.kind {
hir::ItemKind::Struct(_, ref generics)
| hir::ItemKind::Union(_, ref generics)
| hir::ItemKind::Enum(_, ref generics)
| hir::ItemKind::OpaqueTy(hir::OpaqueTy {
ref generics, impl_trait_fn: None, ..
})
| hir::ItemKind::TyAlias(_, ref generics)
| hir::ItemKind::Trait(_, _, ref generics, ..) => {
let result = object_lifetime_defaults_for_item(tcx, generics);
// Debugging aid.
if attr::contains_name(&item.attrs, sym::rustc_object_lifetime_default) {
let object_lifetime_default_reprs: String = result
.iter()
.map(|set| match *set {
Set1::Empty => "BaseDefault".into(),
Set1::One(Region::Static) => "'static".into(),
Set1::One(Region::EarlyBound(mut i, _, _)) => generics
.params
.iter()
.find_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
if i == 0 {
return Some(param.name.ident().to_string().into());
}
i -= 1;
None
}
_ => None,
})
.unwrap(),
Set1::One(_) => bug!(),
Set1::Many => "Ambiguous".into(),
})
.collect::<Vec<Cow<'static, str>>>()
.join(",");
tcx.sess.span_err(item.span, &object_lifetime_default_reprs);
}
map.insert(item.hir_id, result);
}
_ => {}
}
}
map
}
/// Scan the bounds and where-clauses on parameters to extract bounds
/// of the form `T:'a` so as to determine the `ObjectLifetimeDefault`
/// for each type parameter.
fn object_lifetime_defaults_for_item(
tcx: TyCtxt<'_>,
generics: &hir::Generics<'_>,
) -> Vec<ObjectLifetimeDefault> {
fn add_bounds(set: &mut Set1<hir::LifetimeName>, bounds: &[hir::GenericBound<'_>]) {
for bound in bounds {
if let hir::GenericBound::Outlives(ref lifetime) = *bound {
set.insert(lifetime.name.modern());
}
}
}
generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => None,
GenericParamKind::Type { .. } => {
let mut set = Set1::Empty;
add_bounds(&mut set, ¶m.bounds);
let param_def_id = tcx.hir().local_def_id(param.hir_id);
for predicate in generics.where_clause.predicates {
// Look for `type: ...` where clauses.
let data = match *predicate {
hir::WherePredicate::BoundPredicate(ref data) => data,
_ => continue,
};
// Ignore `for<'a> type: ...` as they can change what
// lifetimes mean (although we could "just" handle it).
if !data.bound_generic_params.is_empty() {
continue;
}
let res = match data.bounded_ty.kind {
hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => path.res,
_ => continue,
};
if res == Res::Def(DefKind::TyParam, param_def_id) {
add_bounds(&mut set, &data.bounds);
}
}
Some(match set {
Set1::Empty => Set1::Empty,
Set1::One(name) => {
if name == hir::LifetimeName::Static {
Set1::One(Region::Static)
} else {
generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => Some((
param.hir_id,
hir::LifetimeName::Param(param.name),
LifetimeDefOrigin::from_param(param),
)),
_ => None,
})
.enumerate()
.find(|&(_, (_, lt_name, _))| lt_name == name)
.map_or(Set1::Many, |(i, (id, _, origin))| {
let def_id = tcx.hir().local_def_id(id);
Set1::One(Region::EarlyBound(i as u32, def_id, origin))
})
}
}
Set1::Many => Set1::Many,
})
}
GenericParamKind::Const { .. } => {
// Generic consts don't impose any constraints.
None
}
})
.collect()
}
impl<'a, 'tcx> LifetimeContext<'a, 'tcx> {
// FIXME(#37666) this works around a limitation in the region inferencer
fn hack<F>(&mut self, f: F)
where
F: for<'b> FnOnce(&mut LifetimeContext<'b, 'tcx>),
{
f(self)
}
fn with<F>(&mut self, wrap_scope: Scope<'_>, f: F)
where
F: for<'b> FnOnce(ScopeRef<'_>, &mut LifetimeContext<'b, 'tcx>),
{
let LifetimeContext { tcx, map, lifetime_uses, .. } = self;
let labels_in_fn = take(&mut self.labels_in_fn);
let xcrate_object_lifetime_defaults = take(&mut self.xcrate_object_lifetime_defaults);
let missing_named_lifetime_spots = take(&mut self.missing_named_lifetime_spots);
let mut this = LifetimeContext {
tcx: *tcx,
map: map,
scope: &wrap_scope,
trait_ref_hack: self.trait_ref_hack,
is_in_fn_syntax: self.is_in_fn_syntax,
labels_in_fn,
xcrate_object_lifetime_defaults,
lifetime_uses,
missing_named_lifetime_spots,
};
debug!("entering scope {:?}", this.scope);
f(self.scope, &mut this);
this.check_uses_for_lifetimes_defined_by_scope();
debug!("exiting scope {:?}", this.scope);
self.labels_in_fn = this.labels_in_fn;
self.xcrate_object_lifetime_defaults = this.xcrate_object_lifetime_defaults;
self.missing_named_lifetime_spots = this.missing_named_lifetime_spots;
}
/// helper method to determine the span to remove when suggesting the
/// deletion of a lifetime
fn lifetime_deletion_span(
&self,
name: ast::Ident,
generics: &hir::Generics<'_>,
) -> Option<Span> {
generics.params.iter().enumerate().find_map(|(i, param)| {
if param.name.ident() == name {
let mut in_band = false;
if let hir::GenericParamKind::Lifetime { kind } = param.kind {
if let hir::LifetimeParamKind::InBand = kind {
in_band = true;
}
}
if in_band {
Some(param.span)
} else {
if generics.params.len() == 1 {
// if sole lifetime, remove the entire `<>` brackets
Some(generics.span)
} else {
// if removing within `<>` brackets, we also want to
// delete a leading or trailing comma as appropriate
if i >= generics.params.len() - 1 {
Some(generics.params[i - 1].span.shrink_to_hi().to(param.span))
} else {
Some(param.span.to(generics.params[i + 1].span.shrink_to_lo()))
}
}
}
} else {
None
}
})
}
// helper method to issue suggestions from `fn rah<'a>(&'a T)` to `fn rah(&T)`
// or from `fn rah<'a>(T<'a>)` to `fn rah(T<'_>)`
fn suggest_eliding_single_use_lifetime(
&self,
err: &mut DiagnosticBuilder<'_>,
def_id: DefId,
lifetime: &hir::Lifetime,
) {
let name = lifetime.name.ident();
let mut remove_decl = None;
if let Some(parent_def_id) = self.tcx.parent(def_id) {
if let Some(generics) = self.tcx.hir().get_generics(parent_def_id) {
remove_decl = self.lifetime_deletion_span(name, generics);
}
}
let mut remove_use = None;
let mut elide_use = None;
let mut find_arg_use_span = |inputs: &[hir::Ty<'_>]| {
for input in inputs {
match input.kind {
hir::TyKind::Rptr(lt, _) => {
if lt.name.ident() == name {
// include the trailing whitespace between the lifetime and type names
let lt_through_ty_span = lifetime.span.to(input.span.shrink_to_hi());
remove_use = Some(
self.tcx
.sess
.source_map()
.span_until_non_whitespace(lt_through_ty_span),
);
break;
}
}
hir::TyKind::Path(ref qpath) => {
if let QPath::Resolved(_, path) = qpath {
let last_segment = &path.segments[path.segments.len() - 1];
let generics = last_segment.generic_args();
for arg in generics.args.iter() {
if let GenericArg::Lifetime(lt) = arg {
if lt.name.ident() == name {
elide_use = Some(lt.span);
break;
}
}
}
break;
}
}
_ => {}
}
}
};
if let Node::Lifetime(hir_lifetime) = self.tcx.hir().get(lifetime.hir_id) {
if let Some(parent) =
self.tcx.hir().find(self.tcx.hir().get_parent_item(hir_lifetime.hir_id))
{
match parent {
Node::Item(item) => {
if let hir::ItemKind::Fn(sig, _, _) = &item.kind {
find_arg_use_span(sig.decl.inputs);
}
}
Node::ImplItem(impl_item) => {
if let hir::ImplItemKind::Method(sig, _) = &impl_item.kind {
find_arg_use_span(sig.decl.inputs);
}
}
_ => {}
}
}
}
let msg = "elide the single-use lifetime";
match (remove_decl, remove_use, elide_use) {
(Some(decl_span), Some(use_span), None) => {
// if both declaration and use deletion spans start at the same
// place ("start at" because the latter includes trailing
// whitespace), then this is an in-band lifetime
if decl_span.shrink_to_lo() == use_span.shrink_to_lo() {
err.span_suggestion(
use_span,
msg,
String::new(),
Applicability::MachineApplicable,
);
} else {
err.multipart_suggestion(
msg,
vec![(decl_span, String::new()), (use_span, String::new())],
Applicability::MachineApplicable,
);
}
}
(Some(decl_span), None, Some(use_span)) => {
err.multipart_suggestion(
msg,
vec![(decl_span, String::new()), (use_span, "'_".to_owned())],
Applicability::MachineApplicable,
);
}
_ => {}
}
}
fn check_uses_for_lifetimes_defined_by_scope(&mut self) {
let defined_by = match self.scope {
Scope::Binder { lifetimes, .. } => lifetimes,
_ => {
debug!("check_uses_for_lifetimes_defined_by_scope: not in a binder scope");
return;
}
};
let mut def_ids: Vec<_> = defined_by
.values()
.flat_map(|region| match region {
Region::EarlyBound(_, def_id, _)
| Region::LateBound(_, def_id, _)
| Region::Free(_, def_id) => Some(*def_id),
Region::LateBoundAnon(..) | Region::Static => None,
})
.collect();
// ensure that we issue lints in a repeatable order
def_ids.sort_by_cached_key(|&def_id| self.tcx.def_path_hash(def_id));
for def_id in def_ids {
debug!("check_uses_for_lifetimes_defined_by_scope: def_id = {:?}", def_id);
let lifetimeuseset = self.lifetime_uses.remove(&def_id);
debug!(
"check_uses_for_lifetimes_defined_by_scope: lifetimeuseset = {:?}",
lifetimeuseset
);
match lifetimeuseset {
Some(LifetimeUseSet::One(lifetime)) => {
let hir_id = self.tcx.hir().as_local_hir_id(def_id).unwrap();
debug!("hir id first={:?}", hir_id);
if let Some((id, span, name)) = match self.tcx.hir().get(hir_id) {
Node::Lifetime(hir_lifetime) => Some((
hir_lifetime.hir_id,
hir_lifetime.span,
hir_lifetime.name.ident(),
)),
Node::GenericParam(param) => {
Some((param.hir_id, param.span, param.name.ident()))
}
_ => None,
} {
debug!("id = {:?} span = {:?} name = {:?}", id, span, name);
if name.name == kw::UnderscoreLifetime {
continue;
}
if let Some(parent_def_id) = self.tcx.parent(def_id) {
if let Some(parent_hir_id) =
self.tcx.hir().as_local_hir_id(parent_def_id)
{
// lifetimes in `derive` expansions don't count (Issue #53738)
if self
.tcx
.hir()
.attrs(parent_hir_id)
.iter()
.any(|attr| attr.check_name(sym::automatically_derived))
{
continue;
}
}
}
self.tcx.struct_span_lint_hir(
lint::builtin::SINGLE_USE_LIFETIMES,
id,
span,
|lint| {
let mut err = lint.build(&format!(
"lifetime parameter `{}` only used once",
name
));
if span == lifetime.span {
// spans are the same for in-band lifetime declarations
err.span_label(span, "this lifetime is only used here");
} else {
err.span_label(span, "this lifetime...");
err.span_label(lifetime.span, "...is used only here");
}
self.suggest_eliding_single_use_lifetime(
&mut err, def_id, lifetime,
);
err.emit();
},
);
}
}
Some(LifetimeUseSet::Many) => {
debug!("not one use lifetime");
}
None => {
let hir_id = self.tcx.hir().as_local_hir_id(def_id).unwrap();
if let Some((id, span, name)) = match self.tcx.hir().get(hir_id) {
Node::Lifetime(hir_lifetime) => Some((
hir_lifetime.hir_id,
hir_lifetime.span,
hir_lifetime.name.ident(),
)),
Node::GenericParam(param) => {
Some((param.hir_id, param.span, param.name.ident()))
}
_ => None,
} {
debug!("id ={:?} span = {:?} name = {:?}", id, span, name);
self.tcx.struct_span_lint_hir(
lint::builtin::UNUSED_LIFETIMES,
id,
span,
|lint| {
let mut err = lint
.build(&format!("lifetime parameter `{}` never used", name));
if let Some(parent_def_id) = self.tcx.parent(def_id) {
if let Some(generics) =
self.tcx.hir().get_generics(parent_def_id)
{
let unused_lt_span =
self.lifetime_deletion_span(name, generics);
if let Some(span) = unused_lt_span {
err.span_suggestion(
span,
"elide the unused lifetime",
String::new(),
Applicability::MachineApplicable,
);
}
}
}
err.emit();
},
);
}
}
}
}
}
/// Visits self by adding a scope and handling recursive walk over the contents with `walk`.
///
/// Handles visiting fns and methods. These are a bit complicated because we must distinguish
/// early- vs late-bound lifetime parameters. We do this by checking which lifetimes appear
/// within type bounds; those are early bound lifetimes, and the rest are late bound.
///
/// For example:
///
/// fn foo<'a,'b,'c,T:Trait<'b>>(...)
///
/// Here `'a` and `'c` are late bound but `'b` is early bound. Note that early- and late-bound
/// lifetimes may be interspersed together.
///
/// If early bound lifetimes are present, we separate them into their own list (and likewise
/// for late bound). They will be numbered sequentially, starting from the lowest index that is
/// already in scope (for a fn item, that will be 0, but for a method it might not be). Late
/// bound lifetimes are resolved by name and associated with a binder ID (`binder_id`), so the
/// ordering is not important there.
fn visit_early_late<F>(
&mut self,
parent_id: Option<hir::HirId>,
decl: &'tcx hir::FnDecl<'tcx>,
generics: &'tcx hir::Generics<'tcx>,
walk: F,
) where
F: for<'b, 'c> FnOnce(&'b mut LifetimeContext<'c, 'tcx>),
{
insert_late_bound_lifetimes(self.map, decl, generics);
// Find the start of nested early scopes, e.g., in methods.
let mut index = 0;
if let Some(parent_id) = parent_id {
let parent = self.tcx.hir().expect_item(parent_id);
if sub_items_have_self_param(&parent.kind) {
index += 1; // Self comes before lifetimes
}
match parent.kind {
hir::ItemKind::Trait(_, _, ref generics, ..)
| hir::ItemKind::Impl { ref generics, .. } => {
index += generics.params.len() as u32;
}
_ => {}
}
}
let mut non_lifetime_count = 0;
let lifetimes = generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
if self.map.late_bound.contains(¶m.hir_id) {
Some(Region::late(&self.tcx.hir(), param))
} else {
Some(Region::early(&self.tcx.hir(), &mut index, param))
}
}
GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
non_lifetime_count += 1;
None
}
})
.collect();
let next_early_index = index + non_lifetime_count;
let scope = Scope::Binder {
lifetimes,
next_early_index,
s: self.scope,
opaque_type_parent: true,
track_lifetime_uses: false,
};
self.with(scope, move |old_scope, this| {
this.check_lifetime_params(old_scope, &generics.params);
this.hack(walk); // FIXME(#37666) workaround in place of `walk(this)`
});
}
fn next_early_index_helper(&self, only_opaque_type_parent: bool) -> u32 {
let mut scope = self.scope;
loop {
match *scope {
Scope::Root => return 0,
Scope::Binder { next_early_index, opaque_type_parent, .. }
if (!only_opaque_type_parent || opaque_type_parent) =>
{
return next_early_index;
}
Scope::Binder { s, .. }
| Scope::Body { s, .. }
| Scope::Elision { s, .. }
| Scope::ObjectLifetimeDefault { s, .. } => scope = s,
}
}
}
/// Returns the next index one would use for an early-bound-region
/// if extending the current scope.
fn next_early_index(&self) -> u32 {
self.next_early_index_helper(true)
}
/// Returns the next index one would use for an `impl Trait` that
/// is being converted into an opaque type alias `impl Trait`. This will be the
/// next early index from the enclosing item, for the most
/// part. See the `opaque_type_parent` field for more info.
fn next_early_index_for_opaque_type(&self) -> u32 {
self.next_early_index_helper(false)
}
fn resolve_lifetime_ref(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
debug!("resolve_lifetime_ref(lifetime_ref={:?})", lifetime_ref);
// If we've already reported an error, just ignore `lifetime_ref`.
if let LifetimeName::Error = lifetime_ref.name {
return;
}
// Walk up the scope chain, tracking the number of fn scopes
// that we pass through, until we find a lifetime with the
// given name or we run out of scopes.
// search.
let mut late_depth = 0;
let mut scope = self.scope;
let mut outermost_body = None;
let result = loop {
match *scope {
Scope::Body { id, s } => {
outermost_body = Some(id);
scope = s;
}
Scope::Root => {
break None;
}
Scope::Binder { ref lifetimes, s, .. } => {
match lifetime_ref.name {
LifetimeName::Param(param_name) => {
if let Some(&def) = lifetimes.get(¶m_name.modern()) {
break Some(def.shifted(late_depth));
}
}
_ => bug!("expected LifetimeName::Param"),
}
late_depth += 1;
scope = s;
}
Scope::Elision { s, .. } | Scope::ObjectLifetimeDefault { s, .. } => {
scope = s;
}
}
};
if let Some(mut def) = result {
if let Region::EarlyBound(..) = def {
// Do not free early-bound regions, only late-bound ones.
} else if let Some(body_id) = outermost_body {
let fn_id = self.tcx.hir().body_owner(body_id);
match self.tcx.hir().get(fn_id) {
Node::Item(&hir::Item { kind: hir::ItemKind::Fn(..), .. })
| Node::TraitItem(&hir::TraitItem {
kind: hir::TraitItemKind::Method(..),
..
})
| Node::ImplItem(&hir::ImplItem {
kind: hir::ImplItemKind::Method(..), ..
}) => {
let scope = self.tcx.hir().local_def_id(fn_id);
def = Region::Free(scope, def.id().unwrap());
}
_ => {}
}
}
// Check for fn-syntax conflicts with in-band lifetime definitions
if self.is_in_fn_syntax {
match def {
Region::EarlyBound(_, _, LifetimeDefOrigin::InBand)
| Region::LateBound(_, _, LifetimeDefOrigin::InBand) => {
struct_span_err!(
self.tcx.sess,
lifetime_ref.span,
E0687,
"lifetimes used in `fn` or `Fn` syntax must be \
explicitly declared using `<...>` binders"
)
.span_label(lifetime_ref.span, "in-band lifetime definition")
.emit();
}
Region::Static
| Region::EarlyBound(_, _, LifetimeDefOrigin::ExplicitOrElided)
| Region::LateBound(_, _, LifetimeDefOrigin::ExplicitOrElided)
| Region::EarlyBound(_, _, LifetimeDefOrigin::Error)
| Region::LateBound(_, _, LifetimeDefOrigin::Error)
| Region::LateBoundAnon(..)
| Region::Free(..) => {}
}
}
self.insert_lifetime(lifetime_ref, def);
} else {
self.emit_undeclared_lifetime_error(lifetime_ref);
}
}
fn visit_segment_args(
&mut self,
res: Res,
depth: usize,
generic_args: &'tcx hir::GenericArgs<'tcx>,
) {
debug!(
"visit_segment_args(res={:?}, depth={:?}, generic_args={:?})",
res, depth, generic_args,
);
if generic_args.parenthesized {
let was_in_fn_syntax = self.is_in_fn_syntax;
self.is_in_fn_syntax = true;
self.visit_fn_like_elision(generic_args.inputs(), Some(generic_args.bindings[0].ty()));
self.is_in_fn_syntax = was_in_fn_syntax;
return;
}
let mut elide_lifetimes = true;
let lifetimes = generic_args
.args
.iter()
.filter_map(|arg| match arg {
hir::GenericArg::Lifetime(lt) => {
if !lt.is_elided() {
elide_lifetimes = false;
}
Some(lt)
}
_ => None,
})
.collect();
if elide_lifetimes {
self.resolve_elided_lifetimes(lifetimes);
} else {
lifetimes.iter().for_each(|lt| self.visit_lifetime(lt));
}
// Figure out if this is a type/trait segment,
// which requires object lifetime defaults.
let parent_def_id = |this: &mut Self, def_id: DefId| {
let def_key = this.tcx.def_key(def_id);
DefId { krate: def_id.krate, index: def_key.parent.expect("missing parent") }
};
let type_def_id = match res {
Res::Def(DefKind::AssocTy, def_id) if depth == 1 => Some(parent_def_id(self, def_id)),
Res::Def(DefKind::Variant, def_id) if depth == 0 => Some(parent_def_id(self, def_id)),
Res::Def(DefKind::Struct, def_id)
| Res::Def(DefKind::Union, def_id)
| Res::Def(DefKind::Enum, def_id)
| Res::Def(DefKind::TyAlias, def_id)
| Res::Def(DefKind::Trait, def_id)
if depth == 0 =>
{
Some(def_id)
}
_ => None,
};
debug!("visit_segment_args: type_def_id={:?}", type_def_id);
// Compute a vector of defaults, one for each type parameter,
// per the rules given in RFCs 599 and 1156. Example:
//
// ```rust
// struct Foo<'a, T: 'a, U> { }
// ```
//
// If you have `Foo<'x, dyn Bar, dyn Baz>`, we want to default
// `dyn Bar` to `dyn Bar + 'x` (because of the `T: 'a` bound)
// and `dyn Baz` to `dyn Baz + 'static` (because there is no
// such bound).
//
// Therefore, we would compute `object_lifetime_defaults` to a
// vector like `['x, 'static]`. Note that the vector only
// includes type parameters.
let object_lifetime_defaults = type_def_id.map_or(vec![], |def_id| {
let in_body = {
let mut scope = self.scope;
loop {
match *scope {
Scope::Root => break false,
Scope::Body { .. } => break true,
Scope::Binder { s, .. }
| Scope::Elision { s, .. }
| Scope::ObjectLifetimeDefault { s, .. } => {
scope = s;
}
}
}
};
let map = &self.map;
let unsubst = if let Some(id) = self.tcx.hir().as_local_hir_id(def_id) {
&map.object_lifetime_defaults[&id]
} else {
let tcx = self.tcx;
self.xcrate_object_lifetime_defaults.entry(def_id).or_insert_with(|| {
tcx.generics_of(def_id)
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamDefKind::Type { object_lifetime_default, .. } => {
Some(object_lifetime_default)
}
GenericParamDefKind::Lifetime | GenericParamDefKind::Const => None,
})
.collect()
})
};
debug!("visit_segment_args: unsubst={:?}", unsubst);
unsubst
.iter()
.map(|set| match *set {
Set1::Empty => {
if in_body {
None
} else {
Some(Region::Static)
}
}
Set1::One(r) => {
let lifetimes = generic_args.args.iter().filter_map(|arg| match arg {
GenericArg::Lifetime(lt) => Some(lt),
_ => None,
});
r.subst(lifetimes, map)
}
Set1::Many => None,
})
.collect()
});
debug!("visit_segment_args: object_lifetime_defaults={:?}", object_lifetime_defaults);
let mut i = 0;
for arg in generic_args.args {
match arg {
GenericArg::Lifetime(_) => {}
GenericArg::Type(ty) => {
if let Some(<) = object_lifetime_defaults.get(i) {
let scope = Scope::ObjectLifetimeDefault { lifetime: lt, s: self.scope };
self.with(scope, |_, this| this.visit_ty(ty));
} else {
self.visit_ty(ty);
}
i += 1;
}
GenericArg::Const(ct) => {
self.visit_anon_const(&ct.value);
}
}
}
// Hack: when resolving the type `XX` in binding like `dyn
// Foo<'b, Item = XX>`, the current object-lifetime default
// would be to examine the trait `Foo` to check whether it has
// a lifetime bound declared on `Item`. e.g., if `Foo` is
// declared like so, then the default object lifetime bound in
// `XX` should be `'b`:
//
// ```rust
// trait Foo<'a> {
// type Item: 'a;
// }
// ```
//
// but if we just have `type Item;`, then it would be
// `'static`. However, we don't get all of this logic correct.
//
// Instead, we do something hacky: if there are no lifetime parameters
// to the trait, then we simply use a default object lifetime
// bound of `'static`, because there is no other possibility. On the other hand,
// if there ARE lifetime parameters, then we require the user to give an
// explicit bound for now.
//
// This is intended to leave room for us to implement the
// correct behavior in the future.
let has_lifetime_parameter = generic_args.args.iter().any(|arg| match arg {
GenericArg::Lifetime(_) => true,
_ => false,
});
// Resolve lifetimes found in the type `XX` from `Item = XX` bindings.
for b in generic_args.bindings {
let scope = Scope::ObjectLifetimeDefault {
lifetime: if has_lifetime_parameter { None } else { Some(Region::Static) },
s: self.scope,
};
self.with(scope, |_, this| this.visit_assoc_type_binding(b));
}
}
fn visit_fn_like_elision(
&mut self,
inputs: &'tcx [hir::Ty<'tcx>],
output: Option<&'tcx hir::Ty<'tcx>>,
) {
debug!("visit_fn_like_elision: enter");
let mut arg_elide = Elide::FreshLateAnon(Cell::new(0));
let arg_scope = Scope::Elision { elide: arg_elide.clone(), s: self.scope };
self.with(arg_scope, |_, this| {
for input in inputs {
this.visit_ty(input);
}
match *this.scope {
Scope::Elision { ref elide, .. } => {
arg_elide = elide.clone();
}
_ => bug!(),
}
});
let output = match output {
Some(ty) => ty,
None => return,
};
debug!("visit_fn_like_elision: determine output");
// Figure out if there's a body we can get argument names from,
// and whether there's a `self` argument (treated specially).
let mut assoc_item_kind = None;
let mut impl_self = None;
let parent = self.tcx.hir().get_parent_node(output.hir_id);
let body = match self.tcx.hir().get(parent) {
// `fn` definitions and methods.
Node::Item(&hir::Item { kind: hir::ItemKind::Fn(.., body), .. }) => Some(body),
Node::TraitItem(&hir::TraitItem {
kind: hir::TraitItemKind::Method(_, ref m), ..
}) => {
if let hir::ItemKind::Trait(.., ref trait_items) =
self.tcx.hir().expect_item(self.tcx.hir().get_parent_item(parent)).kind
{
assoc_item_kind =
trait_items.iter().find(|ti| ti.id.hir_id == parent).map(|ti| ti.kind);
}
match *m {
hir::TraitMethod::Required(_) => None,
hir::TraitMethod::Provided(body) => Some(body),
}
}
Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Method(_, body), .. }) => {
if let hir::ItemKind::Impl { ref self_ty, ref items, .. } =
self.tcx.hir().expect_item(self.tcx.hir().get_parent_item(parent)).kind
{
impl_self = Some(self_ty);
assoc_item_kind =
items.iter().find(|ii| ii.id.hir_id == parent).map(|ii| ii.kind);
}
Some(body)
}
// Foreign functions, `fn(...) -> R` and `Trait(...) -> R` (both types and bounds).
Node::ForeignItem(_) | Node::Ty(_) | Node::TraitRef(_) => None,
// Everything else (only closures?) doesn't
// actually enjoy elision in return types.
_ => {
self.visit_ty(output);
return;
}
};
let has_self = match assoc_item_kind {
Some(hir::AssocItemKind::Method { has_self }) => has_self,
_ => false,
};
// In accordance with the rules for lifetime elision, we can determine
// what region to use for elision in the output type in two ways.
// First (determined here), if `self` is by-reference, then the
// implied output region is the region of the self parameter.
if has_self {
struct SelfVisitor<'a> {
map: &'a NamedRegionMap,
impl_self: Option<&'a hir::TyKind<'a>>,
lifetime: Set1<Region>,
}
impl SelfVisitor<'_> {
// Look for `self: &'a Self` - also desugared from `&'a self`,
// and if that matches, use it for elision and return early.
fn is_self_ty(&self, res: Res) -> bool {
if let Res::SelfTy(..) = res {
return true;
}
// Can't always rely on literal (or implied) `Self` due
// to the way elision rules were originally specified.
if let Some(&hir::TyKind::Path(hir::QPath::Resolved(None, ref path))) =
self.impl_self
{
match path.res {
// Whitelist the types that unambiguously always
// result in the same type constructor being used
// (it can't differ between `Self` and `self`).
Res::Def(DefKind::Struct, _)
| Res::Def(DefKind::Union, _)
| Res::Def(DefKind::Enum, _)
| Res::PrimTy(_) => return res == path.res,
_ => {}
}
}
false
}
}
impl<'a> Visitor<'a> for SelfVisitor<'a> {
type Map = Map<'a>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<'_, Self::Map> {
NestedVisitorMap::None
}
fn visit_ty(&mut self, ty: &'a hir::Ty<'a>) {
if let hir::TyKind::Rptr(lifetime_ref, ref mt) = ty.kind {
if let hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) = mt.ty.kind
{
if self.is_self_ty(path.res) {
if let Some(lifetime) = self.map.defs.get(&lifetime_ref.hir_id) {
self.lifetime.insert(*lifetime);
}
}
}
}
intravisit::walk_ty(self, ty)
}
}
let mut visitor = SelfVisitor {
map: self.map,
impl_self: impl_self.map(|ty| &ty.kind),
lifetime: Set1::Empty,
};
visitor.visit_ty(&inputs[0]);
if let Set1::One(lifetime) = visitor.lifetime {
let scope = Scope::Elision { elide: Elide::Exact(lifetime), s: self.scope };
self.with(scope, |_, this| this.visit_ty(output));
return;
}
}
// Second, if there was exactly one lifetime (either a substitution or a
// reference) in the arguments, then any anonymous regions in the output
// have that lifetime.
let mut possible_implied_output_region = None;
let mut lifetime_count = 0;
let arg_lifetimes = inputs
.iter()
.enumerate()
.skip(has_self as usize)
.map(|(i, input)| {
let mut gather = GatherLifetimes {
map: self.map,
outer_index: ty::INNERMOST,
have_bound_regions: false,
lifetimes: Default::default(),
};
gather.visit_ty(input);
lifetime_count += gather.lifetimes.len();
if lifetime_count == 1 && gather.lifetimes.len() == 1 {
// there's a chance that the unique lifetime of this
// iteration will be the appropriate lifetime for output
// parameters, so lets store it.
possible_implied_output_region = gather.lifetimes.iter().cloned().next();
}
ElisionFailureInfo {
parent: body,
index: i,
lifetime_count: gather.lifetimes.len(),
have_bound_regions: gather.have_bound_regions,
span: input.span,
}
})
.collect();
let elide = if lifetime_count == 1 {
Elide::Exact(possible_implied_output_region.unwrap())
} else {
Elide::Error(arg_lifetimes)
};
debug!("visit_fn_like_elision: elide={:?}", elide);
let scope = Scope::Elision { elide, s: self.scope };
self.with(scope, |_, this| this.visit_ty(output));
debug!("visit_fn_like_elision: exit");
struct GatherLifetimes<'a> {
map: &'a NamedRegionMap,
outer_index: ty::DebruijnIndex,
have_bound_regions: bool,
lifetimes: FxHashSet<Region>,
}
impl<'v, 'a> Visitor<'v> for GatherLifetimes<'a> {
type Map = Map<'v>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<'_, Self::Map> {
NestedVisitorMap::None
}
fn visit_ty(&mut self, ty: &hir::Ty<'_>) {
if let hir::TyKind::BareFn(_) = ty.kind {
self.outer_index.shift_in(1);
}
match ty.kind {
hir::TyKind::TraitObject(bounds, ref lifetime) => {
for bound in bounds {
self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None);
}
// Stay on the safe side and don't include the object
// lifetime default (which may not end up being used).
if !lifetime.is_elided() {
self.visit_lifetime(lifetime);
}
}
_ => {
intravisit::walk_ty(self, ty);
}
}
if let hir::TyKind::BareFn(_) = ty.kind {
self.outer_index.shift_out(1);
}
}
fn visit_generic_param(&mut self, param: &hir::GenericParam<'_>) {
if let hir::GenericParamKind::Lifetime { .. } = param.kind {
// FIXME(eddyb) Do we want this? It only makes a difference
// if this `for<'a>` lifetime parameter is never used.
self.have_bound_regions = true;
}
intravisit::walk_generic_param(self, param);
}
fn visit_poly_trait_ref(
&mut self,
trait_ref: &hir::PolyTraitRef<'_>,
modifier: hir::TraitBoundModifier,
) {
self.outer_index.shift_in(1);
intravisit::walk_poly_trait_ref(self, trait_ref, modifier);
self.outer_index.shift_out(1);
}
fn visit_lifetime(&mut self, lifetime_ref: &hir::Lifetime) {
if let Some(&lifetime) = self.map.defs.get(&lifetime_ref.hir_id) {
match lifetime {
Region::LateBound(debruijn, _, _) | Region::LateBoundAnon(debruijn, _)
if debruijn < self.outer_index =>
{
self.have_bound_regions = true;
}
_ => {
self.lifetimes.insert(lifetime.shifted_out_to_binder(self.outer_index));
}
}
}
}
}
}
fn resolve_elided_lifetimes(&mut self, lifetime_refs: Vec<&'tcx hir::Lifetime>) {
debug!("resolve_elided_lifetimes(lifetime_refs={:?})", lifetime_refs);
if lifetime_refs.is_empty() {
return;
}
let span = lifetime_refs[0].span;
let mut late_depth = 0;
let mut scope = self.scope;
let mut lifetime_names = FxHashSet::default();
let error = loop {
match *scope {
// Do not assign any resolution, it will be inferred.
Scope::Body { .. } => return,
Scope::Root => break None,
Scope::Binder { s, ref lifetimes, .. } => {
// collect named lifetimes for suggestions
for name in lifetimes.keys() {
if let hir::ParamName::Plain(name) = name {
lifetime_names.insert(*name);
}
}
late_depth += 1;
scope = s;
}
Scope::Elision { ref elide, ref s, .. } => {
let lifetime = match *elide {
Elide::FreshLateAnon(ref counter) => {
for lifetime_ref in lifetime_refs {
let lifetime = Region::late_anon(counter).shifted(late_depth);
self.insert_lifetime(lifetime_ref, lifetime);
}
return;
}
Elide::Exact(l) => l.shifted(late_depth),
Elide::Error(ref e) => {
if let Scope::Binder { ref lifetimes, .. } = s {
// collect named lifetimes for suggestions
for name in lifetimes.keys() {
if let hir::ParamName::Plain(name) = name {
lifetime_names.insert(*name);
}
}
}
break Some(e);
}
};
for lifetime_ref in lifetime_refs {
self.insert_lifetime(lifetime_ref, lifetime);
}
return;
}
Scope::ObjectLifetimeDefault { s, .. } => {
scope = s;
}
}
};
let mut err = self.report_missing_lifetime_specifiers(span, lifetime_refs.len());
let mut add_label = true;
if let Some(params) = error {
if lifetime_refs.len() == 1 {
add_label = add_label && self.report_elision_failure(&mut err, params, span);
}
}
if add_label {
self.add_missing_lifetime_specifiers_label(
&mut err,
span,
lifetime_refs.len(),
&lifetime_names,
error.map(|p| &p[..]).unwrap_or(&[]),
);
}
err.emit();
}
fn suggest_lifetime(&self, db: &mut DiagnosticBuilder<'_>, span: Span, msg: &str) -> bool {
match self.tcx.sess.source_map().span_to_snippet(span) {
Ok(ref snippet) => {
let (sugg, applicability) = if snippet == "&" {
("&'static ".to_owned(), Applicability::MachineApplicable)
} else if snippet == "'_" {
("'static".to_owned(), Applicability::MachineApplicable)
} else {
(format!("{} + 'static", snippet), Applicability::MaybeIncorrect)
};
db.span_suggestion(span, msg, sugg, applicability);
false
}
Err(_) => {
db.help(msg);
true
}
}
}
fn report_elision_failure(
&mut self,
db: &mut DiagnosticBuilder<'_>,
params: &[ElisionFailureInfo],
span: Span,
) -> bool {
let mut m = String::new();
let len = params.len();
let elided_params: Vec<_> =
params.iter().cloned().filter(|info| info.lifetime_count > 0).collect();
let elided_len = elided_params.len();
for (i, info) in elided_params.into_iter().enumerate() {
let ElisionFailureInfo { parent, index, lifetime_count: n, have_bound_regions, span } =
info;
db.span_label(span, "");
let help_name = if let Some(ident) =
parent.and_then(|body| self.tcx.hir().body(body).params[index].pat.simple_ident())
{
format!("`{}`", ident)
} else {
format!("argument {}", index + 1)
};
m.push_str(
&(if n == 1 {
help_name
} else {
format!(
"one of {}'s {} {}lifetimes",
help_name,
n,
if have_bound_regions { "free " } else { "" }
)
})[..],
);
if elided_len == 2 && i == 0 {
m.push_str(" or ");
} else if i + 2 == elided_len {
m.push_str(", or ");
} else if i != elided_len - 1 {
m.push_str(", ");
}
}
if len == 0 {
db.help(
"this function's return type contains a borrowed value, \
but there is no value for it to be borrowed from",
);
self.suggest_lifetime(db, span, "consider giving it a 'static lifetime")
} else if elided_len == 0 {
db.help(
"this function's return type contains a borrowed value with \
an elided lifetime, but the lifetime cannot be derived from \
the arguments",
);
let msg = "consider giving it an explicit bounded or 'static lifetime";
self.suggest_lifetime(db, span, msg)
} else if elided_len == 1 {
db.help(&format!(
"this function's return type contains a borrowed value, \
but the signature does not say which {} it is borrowed from",
m
));
true
} else {
db.help(&format!(
"this function's return type contains a borrowed value, \
but the signature does not say whether it is borrowed from {}",
m
));
true
}
}
fn resolve_object_lifetime_default(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
debug!("resolve_object_lifetime_default(lifetime_ref={:?})", lifetime_ref);
let mut late_depth = 0;
let mut scope = self.scope;
let lifetime = loop {
match *scope {
Scope::Binder { s, .. } => {
late_depth += 1;
scope = s;
}
Scope::Root | Scope::Elision { .. } => break Region::Static,
Scope::Body { .. } | Scope::ObjectLifetimeDefault { lifetime: None, .. } => return,
Scope::ObjectLifetimeDefault { lifetime: Some(l), .. } => break l,
}
};
self.insert_lifetime(lifetime_ref, lifetime.shifted(late_depth));
}
fn check_lifetime_params(
&mut self,
old_scope: ScopeRef<'_>,
params: &'tcx [hir::GenericParam<'tcx>],
) {
let lifetimes: Vec<_> = params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => Some((param, param.name.modern())),
_ => None,
})
.collect();
for (i, (lifetime_i, lifetime_i_name)) in lifetimes.iter().enumerate() {
if let hir::ParamName::Plain(_) = lifetime_i_name {
let name = lifetime_i_name.ident().name;
if name == kw::UnderscoreLifetime || name == kw::StaticLifetime {
let mut err = struct_span_err!(
self.tcx.sess,
lifetime_i.span,
E0262,
"invalid lifetime parameter name: `{}`",
lifetime_i.name.ident(),
);
err.span_label(
lifetime_i.span,
format!("{} is a reserved lifetime name", name),
);
err.emit();
}
}
// It is a hard error to shadow a lifetime within the same scope.
for (lifetime_j, lifetime_j_name) in lifetimes.iter().skip(i + 1) {
if lifetime_i_name == lifetime_j_name {
struct_span_err!(
self.tcx.sess,
lifetime_j.span,
E0263,
"lifetime name `{}` declared twice in the same scope",
lifetime_j.name.ident()
)
.span_label(lifetime_j.span, "declared twice")
.span_label(lifetime_i.span, "previous declaration here")
.emit();
}
}
// It is a soft error to shadow a lifetime within a parent scope.
self.check_lifetime_param_for_shadowing(old_scope, &lifetime_i);
for bound in lifetime_i.bounds {
match bound {
hir::GenericBound::Outlives(ref lt) => match lt.name {
hir::LifetimeName::Underscore => self.tcx.sess.delay_span_bug(
lt.span,
"use of `'_` in illegal place, but not caught by lowering",
),
hir::LifetimeName::Static => {
self.insert_lifetime(lt, Region::Static);
self.tcx
.sess
.struct_span_warn(
lifetime_i.span.to(lt.span),
&format!(
"unnecessary lifetime parameter `{}`",
lifetime_i.name.ident(),
),
)
.help(&format!(
"you can use the `'static` lifetime directly, in place of `{}`",
lifetime_i.name.ident(),
))
.emit();
}
hir::LifetimeName::Param(_) | hir::LifetimeName::Implicit => {
self.resolve_lifetime_ref(lt);
}
hir::LifetimeName::ImplicitObjectLifetimeDefault => {
self.tcx.sess.delay_span_bug(
lt.span,
"lowering generated `ImplicitObjectLifetimeDefault` \
outside of an object type",
)
}
hir::LifetimeName::Error => {
// No need to do anything, error already reported.
}
},
_ => bug!(),
}
}
}
}
fn check_lifetime_param_for_shadowing(
&self,
mut old_scope: ScopeRef<'_>,
param: &'tcx hir::GenericParam<'tcx>,
) {
for label in &self.labels_in_fn {
// FIXME (#24278): non-hygienic comparison
if param.name.ident().name == label.name {
signal_shadowing_problem(
self.tcx,
label.name,
original_label(label.span),
shadower_lifetime(¶m),
);
return;
}
}
loop {
match *old_scope {
Scope::Body { s, .. }
| Scope::Elision { s, .. }
| Scope::ObjectLifetimeDefault { s, .. } => {
old_scope = s;
}
Scope::Root => {
return;
}
Scope::Binder { ref lifetimes, s, .. } => {
if let Some(&def) = lifetimes.get(¶m.name.modern()) {
let hir_id = self.tcx.hir().as_local_hir_id(def.id().unwrap()).unwrap();
signal_shadowing_problem(
self.tcx,
param.name.ident().name,
original_lifetime(self.tcx.hir().span(hir_id)),
shadower_lifetime(¶m),
);
return;
}
old_scope = s;
}
}
}
}
/// Returns `true` if, in the current scope, replacing `'_` would be
/// equivalent to a single-use lifetime.
fn track_lifetime_uses(&self) -> bool {
let mut scope = self.scope;
loop {
match *scope {
Scope::Root => break false,
// Inside of items, it depends on the kind of item.
Scope::Binder { track_lifetime_uses, .. } => break track_lifetime_uses,
// Inside a body, `'_` will use an inference variable,
// should be fine.
Scope::Body { .. } => break true,
// A lifetime only used in a fn argument could as well
// be replaced with `'_`, as that would generate a
// fresh name, too.
Scope::Elision { elide: Elide::FreshLateAnon(_), .. } => break true,
// In the return type or other such place, `'_` is not
// going to make a fresh name, so we cannot
// necessarily replace a single-use lifetime with
// `'_`.
Scope::Elision { elide: Elide::Exact(_), .. } => break false,
Scope::Elision { elide: Elide::Error(_), .. } => break false,
Scope::ObjectLifetimeDefault { s, .. } => scope = s,
}
}
}
fn insert_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime, def: Region) {
if lifetime_ref.hir_id == hir::DUMMY_HIR_ID {
span_bug!(
lifetime_ref.span,
"lifetime reference not renumbered, \
probably a bug in syntax::fold"
);
}
debug!(
"insert_lifetime: {} resolved to {:?} span={:?}",
self.tcx.hir().node_to_string(lifetime_ref.hir_id),
def,
self.tcx.sess.source_map().span_to_string(lifetime_ref.span)
);
self.map.defs.insert(lifetime_ref.hir_id, def);
match def {
Region::LateBoundAnon(..) | Region::Static => {
// These are anonymous lifetimes or lifetimes that are not declared.
}
Region::Free(_, def_id)
| Region::LateBound(_, def_id, _)
| Region::EarlyBound(_, def_id, _) => {
// A lifetime declared by the user.
let track_lifetime_uses = self.track_lifetime_uses();
debug!("insert_lifetime: track_lifetime_uses={}", track_lifetime_uses);
if track_lifetime_uses && !self.lifetime_uses.contains_key(&def_id) {
debug!("insert_lifetime: first use of {:?}", def_id);
self.lifetime_uses.insert(def_id, LifetimeUseSet::One(lifetime_ref));
} else {
debug!("insert_lifetime: many uses of {:?}", def_id);
self.lifetime_uses.insert(def_id, LifetimeUseSet::Many);
}
}
}
}
/// Sometimes we resolve a lifetime, but later find that it is an
/// error (esp. around impl trait). In that case, we remove the
/// entry into `map.defs` so as not to confuse later code.
fn uninsert_lifetime_on_error(&mut self, lifetime_ref: &'tcx hir::Lifetime, bad_def: Region) {
let old_value = self.map.defs.remove(&lifetime_ref.hir_id);
assert_eq!(old_value, Some(bad_def));
}
}
/// Detects late-bound lifetimes and inserts them into
/// `map.late_bound`.
///
/// A region declared on a fn is **late-bound** if:
/// - it is constrained by an argument type;
/// - it does not appear in a where-clause.
///
/// "Constrained" basically means that it appears in any type but
/// not amongst the inputs to a projection. In other words, `<&'a
/// T as Trait<''b>>::Foo` does not constrain `'a` or `'b`.
fn insert_late_bound_lifetimes(
map: &mut NamedRegionMap,
decl: &hir::FnDecl<'_>,
generics: &hir::Generics<'_>,
) {
debug!("insert_late_bound_lifetimes(decl={:?}, generics={:?})", decl, generics);
let mut constrained_by_input = ConstrainedCollector::default();
for arg_ty in decl.inputs {
constrained_by_input.visit_ty(arg_ty);
}
let mut appears_in_output = AllCollector::default();
intravisit::walk_fn_ret_ty(&mut appears_in_output, &decl.output);
debug!("insert_late_bound_lifetimes: constrained_by_input={:?}", constrained_by_input.regions);
// Walk the lifetimes that appear in where clauses.
//
// Subtle point: because we disallow nested bindings, we can just
// ignore binders here and scrape up all names we see.
let mut appears_in_where_clause = AllCollector::default();
appears_in_where_clause.visit_generics(generics);
for param in generics.params {
if let hir::GenericParamKind::Lifetime { .. } = param.kind {
if !param.bounds.is_empty() {
// `'a: 'b` means both `'a` and `'b` are referenced
appears_in_where_clause
.regions
.insert(hir::LifetimeName::Param(param.name.modern()));
}
}
}
debug!(
"insert_late_bound_lifetimes: appears_in_where_clause={:?}",
appears_in_where_clause.regions
);
// Late bound regions are those that:
// - appear in the inputs
// - do not appear in the where-clauses
// - are not implicitly captured by `impl Trait`
for param in generics.params {
match param.kind {
hir::GenericParamKind::Lifetime { .. } => { /* fall through */ }
// Neither types nor consts are late-bound.
hir::GenericParamKind::Type { .. } | hir::GenericParamKind::Const { .. } => continue,
}
let lt_name = hir::LifetimeName::Param(param.name.modern());
// appears in the where clauses? early-bound.
if appears_in_where_clause.regions.contains(<_name) {
continue;
}
// does not appear in the inputs, but appears in the return type? early-bound.
if !constrained_by_input.regions.contains(<_name)
&& appears_in_output.regions.contains(<_name)
{
continue;
}
debug!(
"insert_late_bound_lifetimes: lifetime {:?} with id {:?} is late-bound",
param.name.ident(),
param.hir_id
);
let inserted = map.late_bound.insert(param.hir_id);
assert!(inserted, "visited lifetime {:?} twice", param.hir_id);
}
return;
#[derive(Default)]
struct ConstrainedCollector {
regions: FxHashSet<hir::LifetimeName>,
}
impl<'v> Visitor<'v> for ConstrainedCollector {
type Map = Map<'v>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<'_, Self::Map> {
NestedVisitorMap::None
}
fn visit_ty(&mut self, ty: &'v hir::Ty<'v>) {
match ty.kind {
hir::TyKind::Path(hir::QPath::Resolved(Some(_), _))
| hir::TyKind::Path(hir::QPath::TypeRelative(..)) => {
// ignore lifetimes appearing in associated type
// projections, as they are not *constrained*
// (defined above)
}
hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => {
// consider only the lifetimes on the final
// segment; I am not sure it's even currently
// valid to have them elsewhere, but even if it
// is, those would be potentially inputs to
// projections
if let Some(last_segment) = path.segments.last() {
self.visit_path_segment(path.span, last_segment);
}
}
_ => {
intravisit::walk_ty(self, ty);
}
}
}
fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
self.regions.insert(lifetime_ref.name.modern());
}
}
#[derive(Default)]
struct AllCollector {
regions: FxHashSet<hir::LifetimeName>,
}
impl<'v> Visitor<'v> for AllCollector {
type Map = Map<'v>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<'_, Self::Map> {
NestedVisitorMap::None
}
fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
self.regions.insert(lifetime_ref.name.modern());
}
}
}
| 40.349245 | 100 | 0.469935 |
f48a44fd9a72872bafc81c933d3baf775c3f5bd2 | 7,099 | //! Gets video information by video ID (one or more), user ID (one only), or game ID (one only).
//! [`get-videos`](https://dev.twitch.tv/docs/api/reference#get-videos)
//!
//! # Accessing the endpoint
//!
//! ## Request: [GetVideosRequest]
//!
//! To use this endpoint, construct a [`GetVideosRequest`] with the [`GetVideosRequest::builder()`] method.
//!
//! ```rust, no_run
//! use twitch_api2::helix::videos::get_videos;
//! let request = get_videos::GetVideosRequest::builder()
//! .user_id("1234".to_string())
//! .build();
//! ```
//!
//! ## Response: [Video]
//!
//! Send the request to receive the response with [`HelixClient::req_get()`](helix::HelixClient::req_get).
//!
//! ```rust, no_run
//! use twitch_api2::helix::{self, videos::get_videos};
//! # use twitch_api2::client;
//! # #[tokio::main]
//! # async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
//! # let client: helix::HelixClient<'static, client::DummyHttpClient> = helix::HelixClient::default();
//! # let token = twitch_oauth2::AccessToken::new("validtoken".to_string());
//! # let token = twitch_oauth2::UserToken::from_existing(twitch_oauth2::dummy_http_client, token, None, None).await?;
//! let request = get_videos::GetVideosRequest::builder()
//! .user_id("1234".to_string())
//! .build();
//! let response: Vec<get_videos::Video> = client.req_get(request, &token).await?.data;
//! # Ok(())
//! # }
//! ```
//!
//! You can also get the [`http::Request`] with [`request.create_request(&token, &client_id)`](helix::RequestGet::create_request)
//! and parse the [`http::Response`] with [`GetVideosRequest::parse_response(None, &request.get_uri(), response)`](GetVideosRequest::parse_response)
use super::*;
use helix::RequestGet;
// FIXME: One of id, user_id or game_id needs to be specified. typed_builder should have enums. id can not be used with other params
/// Query Parameters for [Get Videos](super::get_videos)
///
/// [`get-videos`](https://dev.twitch.tv/docs/api/reference#get-videos)
#[derive(PartialEq, typed_builder::TypedBuilder, Deserialize, Serialize, Clone, Debug)]
#[non_exhaustive]
pub struct GetVideosRequest {
/// ID of the video being queried. Limit: 100. If this is specified, you cannot use any of the optional query parameters below.
#[builder(default)]
pub id: Vec<types::VideoId>,
/// ID of the user who owns the video.
#[builder(default, setter(into))]
pub user_id: Option<types::UserId>,
/// ID of the game the video is of.
#[builder(default, setter(into))]
pub game_id: Option<types::CategoryId>,
/// Cursor for forward pagination: tells the server where to start fetching the next set of results, in a multi-page response. The cursor value specified here is from the pagination response field of a prior query.
#[builder(default)]
pub after: Option<helix::Cursor>,
/// Cursor for backward pagination: tells the server where to start fetching the next set of results, in a multi-page response. The cursor value specified here is from the pagination response field of a prior query.
#[builder(default)]
pub before: Option<helix::Cursor>,
/// Number of values to be returned when getting videos by user or game ID. Limit: 100. Default: 20.
#[builder(default)]
#[builder(setter(strip_option))]
pub first: Option<usize>,
/// Language of the video being queried. Limit: 1.
#[builder(default, setter(into))]
pub language: Option<String>,
/// Period during which the video was created. Valid values: "all", "day", "week", "month". Default: "all".
#[builder(default, setter(into))]
pub period: Option<VideoPeriod>,
/// Sort order of the videos. Valid values: "time", "trending", "views". Default: "time".
#[builder(default, setter(into))]
pub sort: Option<Sort>,
/// Type of video. Valid values: "all", "upload", "archive", "highlight". Default: "all".
#[serde(rename = "type")]
#[builder(default, setter(into))]
pub type_: Option<VideoTypeFilter>,
}
/// Return Values for [Get Videos](super::get_videos)
///
/// [`get-videos`](https://dev.twitch.tv/docs/api/reference#get-videos)
#[derive(PartialEq, Deserialize, Debug, Clone)]
#[cfg_attr(feature = "deny_unknown_fields", serde(deny_unknown_fields))]
#[non_exhaustive]
pub struct Video {
/// Date when the video was created.
pub created_at: types::Timestamp,
/// Description of the video.
pub description: String,
/// Length of the video.
pub duration: String,
/// ID of the video.
pub id: types::VideoId,
/// Language of the video.
pub language: String,
/// Date when the video was published.
pub published_at: types::Timestamp,
/// Template URL for the thumbnail of the video.
pub thumbnail_url: String,
/// Title of the video.
pub title: String,
/// Type of video. Valid values: "upload", "archive", "highlight".
#[serde(rename = "type")]
pub type_: types::VideoType,
/// URL of the video.
pub url: String,
/// ID of the user who owns the video.
pub user_id: types::UserId,
/// Display name corresponding to user_id.
pub user_name: types::DisplayName,
/// Login of the user who owns the video.
pub user_login: types::UserName,
/// Number of times the video has been viewed.
pub view_count: i64,
/// Indicates whether the video is publicly viewable. Valid values: "public", "private".
pub viewable: types::VideoPrivacy,
}
impl Request for GetVideosRequest {
type Response = Vec<Video>;
const PATH: &'static str = "videos";
#[cfg(feature = "twitch_oauth2")]
const SCOPE: &'static [twitch_oauth2::Scope] = &[];
}
impl RequestGet for GetVideosRequest {}
impl helix::Paginated for GetVideosRequest {
fn set_pagination(&mut self, cursor: Option<helix::Cursor>) { self.after = cursor }
}
#[test]
fn test_request() {
use helix::*;
let req = GetVideosRequest::builder()
.id(vec!["234482848".to_string()])
.build();
// From twitch docs
let data = br#"
{
"data": [{
"id": "234482848",
"user_id": "67955580",
"user_login": "chewiemelodies",
"user_name": "ChewieMelodies",
"title": "-",
"description": "",
"created_at": "2018-03-02T20:53:41Z",
"published_at": "2018-03-02T20:53:41Z",
"url": "https://www.twitch.tv/videos/234482848",
"thumbnail_url": "https://static-cdn.jtvnw.net/s3_vods/bebc8cba2926d1967418_chewiemelodies_27786761696_805342775/thumb/thumb0-%{width}x%{height}.jpg",
"viewable": "public",
"view_count": 142,
"language": "en",
"type": "archive",
"duration": "3h8m33s"
}],
"pagination":{"cursor":"eyJiIjpudWxsLCJhIjoiMTUwMzQ0MTc3NjQyNDQyMjAwMCJ9"}
}
"#
.to_vec();
let http_response = http::Response::builder().body(data).unwrap();
let uri = req.get_uri().unwrap();
assert_eq!(
uri.to_string(),
"https://api.twitch.tv/helix/videos?id=234482848"
);
dbg!(GetVideosRequest::parse_response(Some(req), &uri, http_response).unwrap());
}
| 39.438889 | 219 | 0.660516 |
79f3ac267f818eba692a728a8f24484a187e885c | 99 | pub type QuickLink = *mut ::core::ffi::c_void;
pub type ShareOperation = *mut ::core::ffi::c_void;
| 33 | 51 | 0.69697 |
e4e02b62aabaf69bec5b50c0fff20587c48af74c | 4,076 | // let () = msg_send! is a common pattern for objc
#![allow(clippy::let_unit_value)]
use super::window::WindowInner;
use crate::connection::ConnectionOps;
use crate::spawn::*;
use cocoa::appkit::{NSApp, NSApplication, NSApplicationActivationPolicyRegular};
use cocoa::base::{id, nil};
use core_foundation::date::CFAbsoluteTimeGetCurrent;
use core_foundation::runloop::*;
use objc::*;
use std::cell::RefCell;
use std::collections::HashMap;
use std::rc::Rc;
use std::sync::atomic::AtomicUsize;
pub struct Connection {
ns_app: id,
pub(crate) windows: RefCell<HashMap<usize, Rc<RefCell<WindowInner>>>>,
pub(crate) next_window_id: AtomicUsize,
}
impl Connection {
pub(crate) fn create_new() -> anyhow::Result<Self> {
// Ensure that the SPAWN_QUEUE is created; it will have nothing
// to run right now.
SPAWN_QUEUE.run();
unsafe {
let ns_app = NSApp();
ns_app.setActivationPolicy_(NSApplicationActivationPolicyRegular);
let conn = Self {
ns_app,
windows: RefCell::new(HashMap::new()),
next_window_id: AtomicUsize::new(1),
};
Ok(conn)
}
}
pub(crate) fn next_window_id(&self) -> usize {
self.next_window_id
.fetch_add(1, ::std::sync::atomic::Ordering::Relaxed)
}
pub(crate) fn window_by_id(&self, window_id: usize) -> Option<Rc<RefCell<WindowInner>>> {
self.windows.borrow().get(&window_id).map(Rc::clone)
}
pub(crate) fn with_window_inner<
R,
F: FnMut(&mut WindowInner) -> anyhow::Result<R> + Send + 'static,
>(
window_id: usize,
mut f: F,
) -> promise::Future<R>
where
R: Send + 'static,
{
let mut prom = promise::Promise::new();
let future = prom.get_future().unwrap();
promise::spawn::spawn_into_main_thread(async move {
if let Some(handle) = Connection::get().unwrap().window_by_id(window_id) {
let mut inner = handle.borrow_mut();
prom.result(f(&mut inner));
}
});
future
}
}
impl ConnectionOps for Connection {
fn terminate_message_loop(&self) {
unsafe {
let () = msg_send![NSApp(), stop: nil];
}
}
fn run_message_loop(&self) -> anyhow::Result<()> {
unsafe {
self.ns_app.run();
}
self.windows.borrow_mut().clear();
Ok(())
}
fn hide_application(&self) {
unsafe {
let () = msg_send![self.ns_app, hide: self.ns_app];
}
}
fn schedule_timer<F: FnMut() + 'static>(&self, interval: std::time::Duration, callback: F) {
let secs_f64 =
(interval.as_secs() as f64) + (f64::from(interval.subsec_nanos()) / 1_000_000_000_f64);
let callback = Box::into_raw(Box::new(callback));
extern "C" fn timer_callback<F: FnMut()>(
_timer_ref: *mut __CFRunLoopTimer,
callback_ptr: *mut std::ffi::c_void,
) {
unsafe {
let callback: *mut F = callback_ptr as _;
(*callback)();
}
}
extern "C" fn release_callback<F: FnMut()>(info: *const std::ffi::c_void) {
let callback: Box<F> = unsafe { Box::from_raw(info as *mut F) };
drop(callback);
}
let timer_ref = unsafe {
CFRunLoopTimerCreate(
std::ptr::null(),
CFAbsoluteTimeGetCurrent() + secs_f64,
secs_f64,
0,
0,
timer_callback::<F>,
&mut CFRunLoopTimerContext {
copyDescription: None,
info: callback as _,
release: Some(release_callback::<F>),
retain: None,
version: 0,
},
)
};
unsafe {
CFRunLoopAddTimer(CFRunLoopGetCurrent(), timer_ref, kCFRunLoopCommonModes);
}
}
}
| 29.536232 | 99 | 0.542934 |
2fc96836de0a9ffa21640d0aba948e9456c84b26 | 1,591 | use std::env;
fn increase(number: i32) {
println!("{}", number + 1);
}
fn decrease(number: i32) {
println!("{}", number - 1);
}
fn help() {
println!("usage:
match_args <string>
Check whether given string is the answer.
match_args {{increase|decrease}} <integer>
Increase or decrease given integer by one.");
}
fn main() {
let args: Vec<String> = env::args().collect();
match args.len() {
// 引数がない場合
1 => {
println!("My name is 'match_args'. Try passing some arguments!");
},
// 引数が1つの場合
2 => {
match args[1].parse() {
Ok(42) => println!("This is the answer!"),
_ => println!("This is not the answer."),
}
},
// コマンドが一つと引数が一つの場合
3 => {
let cmd = &args[1];
let num = &args[2];
// 数字をパース
let number: i32 = match num.parse() {
Ok(n) => {
n
},
Err(_) => {
println!("error: second argument not an integer");
help();
return;
},
};
// コマンドをパース
match &cmd[..] {
"increase" => increase(number),
"decrease" => decrease(number),
_ => {
println!("error: invalid command");
help();
},
}
},
// その他の場合
_ => {
// ヘルプメッセージを表示
help();
}
}
}
| 24.106061 | 77 | 0.395977 |
5b112b05f2e16d377014012d3feaf98b6f7945ed | 5,818 | // note this uses `smol`. you can use `tokio` or `async_std` or `async_io` if you prefer.
use anyhow::Context as _;
use std::collections::HashMap;
// extensions to the Privmsg type
use twitchchat::PrivmsgExt as _;
use twitchchat::{
messages::{Commands, Privmsg},
runner::{AsyncRunner, NotifyHandle, Status},
UserConfig,
};
fn main() -> anyhow::Result<()> {
// you'll need a user configuration
let user_config = get_user_config()?;
// and some channels to join
let channels = channels_to_join()?;
let start = std::time::Instant::now();
let mut bot = Bot::default()
.with_command("!hello", |args: Args| {
let output = format!("hello {}!", args.msg.name());
// We can 'reply' to this message using a writer + our output message
args.writer.reply(&args.msg, &output).unwrap();
})
.with_command("!uptime", move |args: Args| {
let output = format!("its been running for {:.2?}", start.elapsed());
// We can send a message back (without quoting the sender) using a writer + our output message
args.writer.say(&args.msg, &output).unwrap();
})
.with_command("!quit", move |args: Args| {
// because we're using sync stuff, turn async into sync with smol!
smol::block_on(async move {
// calling this will cause read_message() to eventually return Status::Quit
args.quit.notify().await
});
});
// run the bot in the executor
smol::block_on(async move { bot.run(&user_config, &channels).await })
}
struct Args<'a, 'b: 'a> {
msg: &'a Privmsg<'b>,
writer: &'a mut twitchchat::Writer,
quit: NotifyHandle,
}
trait Command: Send + Sync {
fn handle(&mut self, args: Args<'_, '_>);
}
impl<F> Command for F
where
F: Fn(Args<'_, '_>),
F: Send + Sync,
{
fn handle(&mut self, args: Args<'_, '_>) {
(self)(args)
}
}
#[derive(Default)]
struct Bot {
commands: HashMap<String, Box<dyn Command>>,
}
impl Bot {
// add this command to the bot
fn with_command(mut self, name: impl Into<String>, cmd: impl Command + 'static) -> Self {
self.commands.insert(name.into(), Box::new(cmd));
self
}
// run the bot until its done
async fn run(&mut self, user_config: &UserConfig, channels: &[String]) -> anyhow::Result<()> {
let connector = twitchchat::connector::smol::Connector::twitch();
let mut runner = AsyncRunner::connect(connector, user_config).await?;
println!("connecting, we are: {}", runner.identity.username());
for channel in channels {
println!("joining: {}", channel);
if let Err(err) = runner.join(channel).await {
eprintln!("error while joining '{}': {}", channel, err);
}
}
// if you store this somewhere, you can quit the bot gracefully
// let quit = runner.quit_handle();
println!("starting main loop");
self.main_loop(&mut runner).await
}
// the main loop of the bot
async fn main_loop(&mut self, runner: &mut AsyncRunner) -> anyhow::Result<()> {
// this is clonable, but we can just share it via &mut
// this is rate-limited writer
let mut writer = runner.writer();
// this is clonable, but using it consumes it.
// this is used to 'quit' the main loop
let quit = runner.quit_handle();
loop {
// this drives the internal state of the crate
match runner.next_message().await? {
// if we get a Privmsg (you'll get an Commands enum for all messages received)
Status::Message(Commands::Privmsg(pm)) => {
// see if its a command and do stuff with it
if let Some(cmd) = Self::parse_command(pm.data()) {
if let Some(command) = self.commands.get_mut(cmd) {
println!("dispatching to: {}", cmd.escape_debug());
let args = Args {
msg: &pm,
writer: &mut writer,
quit: quit.clone(),
};
command.handle(args);
}
}
}
// stop if we're stopping
Status::Quit | Status::Eof => break,
// ignore the rest
Status::Message(..) => continue,
}
}
println!("end of main loop");
Ok(())
}
fn parse_command(input: &str) -> Option<&str> {
if !input.starts_with('!') {
return None;
}
input.splitn(2, ' ').next()
}
}
// some helpers for the demo
fn get_env_var(key: &str) -> anyhow::Result<String> {
std::env::var(key).with_context(|| format!("please set `{}`", key))
}
// channels can be either in the form of '#museun' or 'museun'. the crate will internally add the missing #
fn channels_to_join() -> anyhow::Result<Vec<String>> {
let channels = get_env_var("TWITCH_CHANNEL")?
.split(',')
.map(ToString::to_string)
.collect();
Ok(channels)
}
fn get_user_config() -> anyhow::Result<twitchchat::UserConfig> {
let name = get_env_var("TWITCH_NAME")?;
let token = get_env_var("TWITCH_TOKEN")?;
// you need a `UserConfig` to connect to Twitch
let config = UserConfig::builder()
// the name of the associated twitch account
.name(name)
// and the provided OAuth token
.token(token)
// and enable all of the advanced message signaling from Twitch
.enable_all_capabilities()
.build()?;
Ok(config)
}
| 33.245714 | 107 | 0.553283 |
877c0be00e1b89ff8dfec3fc656e92dad16f49a5 | 2,692 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! AddMergeExec adds MergeExec to merge plans
//! with more partitions into one partition when the node
//! needs a single partition
use super::optimizer::PhysicalOptimizerRule;
use crate::{
error::Result,
physical_plan::{merge::MergeExec, Distribution},
};
use std::sync::Arc;
/// Introduces MergeExec
pub struct AddMergeExec {}
impl AddMergeExec {
#[allow(missing_docs)]
pub fn new() -> Self {
Self {}
}
}
impl PhysicalOptimizerRule for AddMergeExec {
fn optimize(
&self,
plan: Arc<dyn crate::physical_plan::ExecutionPlan>,
config: &crate::execution::context::ExecutionConfig,
) -> Result<Arc<dyn crate::physical_plan::ExecutionPlan>> {
if plan.children().is_empty() {
// leaf node, children cannot be replaced
Ok(plan.clone())
} else {
let children = plan
.children()
.iter()
.map(|child| self.optimize(child.clone(), config))
.collect::<Result<Vec<_>>>()?;
match plan.required_child_distribution() {
Distribution::UnspecifiedDistribution => plan.with_new_children(children),
Distribution::HashPartitioned(_) => plan.with_new_children(children),
Distribution::SinglePartition => plan.with_new_children(
children
.iter()
.map(|child| {
if child.output_partitioning().partition_count() == 1 {
child.clone()
} else {
Arc::new(MergeExec::new(child.clone()))
}
})
.collect(),
),
}
}
}
fn name(&self) -> &str {
"add_merge_exec"
}
}
| 35.421053 | 90 | 0.586924 |
9b62991f417f30e387a3a7a9307f143f6e9c2b61 | 3,747 | use mrklt::proof::ProofElem;
use wasm_bindgen::prelude::{wasm_bindgen, JsValue};
/// Accepts a list of blake2b-256 hashes packed into a single array. Returns the 32 byte root hash.
///
/// # Panics
///
/// Panics if the input value is not a multiple of 32.
/// Panics if number of leaves is 0.
#[wasm_bindgen]
pub fn compute_root(leaves: &[u8]) -> Box<[u8]> {
let leaves = split32(leaves);
let root = mrklt::compute_root::<Blake2b256Spr>(&leaves);
Box::new(root)
}
/// # Panics
///
/// Panics if the packed "leaves" array length is not a multiple of 32.
/// Panics if leaf_index is >= leaves.len().
#[wasm_bindgen]
pub fn create_proof(leaf_index: usize, leaves: &[u8]) -> JsValue {
let leaves = split32(leaves);
let proof = mrklt::create_proof::<Blake2b256Spr>(leaf_index, &leaves);
JsValue::from_serde(&proof).unwrap()
}
/// Returns the 32 byte root hash.
///
/// # Panics
///
/// Panics if leaf has a length other than 32.
/// Panics if proof is not a valid json serialized list of proof elements.
#[wasm_bindgen]
pub fn verify_proof(leaf: &[u8], proof: JsValue) -> Box<[u8]> {
let leaf = to_fixed(leaf);
let proof: Box<[ProofElem<[u8; 32]>]> = proof.into_serde().unwrap();
let root = mrklt::verify_proof::<Blake2b256Spr>(&leaf, &proof);
Box::new(root)
}
/// Compute root and create proofs for every leaf. This is much more efficient than calling
/// [`compute_root`] followed by [`create_proof`] for every element of the tree.
///
/// Accepts a list of blake2b-256 hashes packed into a single array. Returns the 32 byte root hash
/// and a list of proofs as a tuple.
///
/// # Panics
///
/// Panics if the input value is not a multiple of 32.
/// Panics if number of leaves is 0.
#[wasm_bindgen]
pub fn construct(leaves: &[u8]) -> JsValue {
let leaves = split32(leaves);
let a = mrklt::proof_map::HashCache::from_leaves::<Blake2b256Spr>(&leaves);
let proofs: Vec<Box<[ProofElem<[u8; 32]>]>> = leaves
.iter()
.enumerate()
.map(|(i, _)| a.create_proof(i))
.collect();
JsValue::from_serde(&(a.root(), proofs)).expect("serialization of return value failed")
}
/// Blake2b256 Second Preimage Resisant
///
/// Blake2b256 hash with leaves double-hashed to resist second preimage attacks.
enum Blake2b256Spr {}
impl mrklt::Merge for Blake2b256Spr {
type Hash = [u8; 32];
fn leaf(l: &[u8; 32]) -> [u8; 32] {
// leaf is already hashed, but we hash it again resist SPA
blake2b256(&[l])
}
fn merge(l: &[u8; 32], r: &[u8; 32]) -> [u8; 32] {
blake2b256(&[l, r])
}
}
fn blake2b256(bs: &[&[u8]]) -> [u8; 32] {
use blake2::{
digest::{Update, VariableOutput},
VarBlake2b,
};
let mut hasher = VarBlake2b::new(32).unwrap();
for b in bs {
hasher.update(&b);
}
let mut ret = [0u8; 32];
hasher.finalize_variable(|digest| ret = to_fixed(digest));
ret
}
/// Panics if bs is not 32 bytes long.
fn to_fixed(bs: &[u8]) -> [u8; 32] {
let mut ret = [0u8; 32];
ret.copy_from_slice(bs);
ret
}
// This could probably be done with unsafe and core::mem::transmute but let's wait for it to be a
// problem before making the optimization. Also, I haven't finished reading the nomicon so I don't
// have my unsafe license.
//
// The zero-allocations version would look something like this:
// ```
// fn split<'a>(bs: &'a [u8]) -> &'a [[u8; 32]];
// ```
// and would panic or if length is not a multiple of 32.
//
/// # Panics
///
/// Panics if length of input is not a multiple of 32.
fn split32(bs: &[u8]) -> Vec<[u8; 32]> {
assert!(
bs.len() % 32 == 0,
"invalid length for packed 32 byte elements"
);
bs.chunks(32).map(to_fixed).collect()
}
| 30.463415 | 99 | 0.632773 |
d99b915ccf6811a4e32b2df645cc9e3b6d24e1dd | 1,938 | extern crate clap;
use clap::{App, Arg, SubCommand};
extern crate chrono;
use chrono::{Datelike, Local};
use directories::ProjectDirs;
use std::fs;
use std::fs::OpenOptions;
use std::io::prelude::*;
use std::io::Error;
use std::path::PathBuf;
fn main() -> Result<(), Error> {
let matches = App::new("daily-bread")
.version("0.1.1")
.about("Emails your notes and commit messages to you at the end of the day")
.author("Avery Harnish")
.subcommand(
SubCommand::with_name("note")
.about("Appends a note to your daily note file")
.arg(
Arg::with_name("input")
.help("the note to take")
.index(1)
.required(true),
),
)
.subcommand(SubCommand::with_name("config").help("Sets your configuration"))
.get_matches();
if let Some(matches) = matches.subcommand_matches("note") {
let note = matches.value_of("input").unwrap();
let now = Local::now();
let year_name = now.year();
let year = now.format("%y");
let month = format!("{:02}", now.month());
let month_name = now.format("%B");
let day = format!("{:02}", now.day());
if let Some(proj_dirs) = ProjectDirs::from("com", "EverlastingBugstopper", "daily-bread") {
let mut dir = PathBuf::from(proj_dirs.config_dir());
dir.push(format!("{}/{}", year_name, month_name));
let mut path = PathBuf::from(dir.clone());
path.push(format!("{}-{}-{}.txt", month, day, year));
fs::create_dir_all(dir)?;
let mut file = OpenOptions::new().create(true).append(true).open(path)?;
write!(file, "{}\n", note)?;
}
} else if let Some(_) = matches.subcommand_matches("config") {
println!("Config command run!");
}
Ok(())
}
| 35.888889 | 99 | 0.5387 |
4a4a40e106deac1f4353ee6dca38781735d34822 | 29,767 | pub mod gk;
mod master_key;
mod side_tasks;
use crate::{
benchmark,
contracts::{
pink::{
group::GroupKeeper,
messaging::{WorkerPinkReport, WorkerPinkRequest},
},
ExecuteEnv, NativeContract,
},
pink::messaging::ContractInfo,
secret_channel::{PeelingReceiver, SecretReceiver},
types::{BlockInfo, OpaqueError, OpaqueQuery, OpaqueReply},
};
use anyhow::Result;
use core::fmt;
use log::info;
use std::collections::BTreeMap;
use crate::contracts;
use crate::pal;
use chain::pallet_registry::RegistryEvent;
use parity_scale_codec::{Decode, Encode};
pub use phactory_api::prpc::{GatekeeperRole, GatekeeperStatus};
use phala_crypto::{
aead,
ecdh::{self, EcdhKey},
sr25519::{Persistence, KDF},
};
use phala_mq::{
traits::MessageChannel, BadOrigin, BindTopic, ContractId, MessageDispatcher, MessageOrigin,
MessageSendQueue, SignedMessageChannel, TypedReceiver,
};
use phala_types::{
contract,
messaging::{
DispatchMasterKeyEvent, GatekeeperChange, GatekeeperLaunch, HeartbeatChallenge,
KeyDistribution, MiningReportEvent, NewGatekeeperEvent, SystemEvent, WorkerEvent,
},
EcdhPublicKey, MasterPublicKey, WorkerPublicKey,
};
use side_tasks::geo_probe;
use sp_core::{hashing::blake2_256, sr25519, Pair, U256};
pub type TransactionResult = Result<(), TransactionError>;
#[derive(Encode, Decode, Debug, Clone)]
pub enum TransactionError {
BadInput,
BadOrigin,
Other(String),
// general
InsufficientBalance,
NoBalance,
UnknownError,
BadContractId,
BadCommand,
SymbolExist,
AssetIdNotFound,
NotAssetOwner,
BadSecret,
BadMachineId,
FailedToSign,
BadDecimal,
DestroyNotAllowed,
// for pdiem
BadAccountInfo,
BadLedgerInfo,
BadTrustedStateData,
BadEpochChangedProofData,
BadTrustedState,
InvalidAccount,
BadTransactionWithProof,
FailedToVerify,
FailedToGetTransaction,
FailedToCalculateBalance,
BadChainId,
TransferringNotAllowed,
}
impl From<BadOrigin> for TransactionError {
fn from(_: BadOrigin) -> TransactionError {
TransactionError::BadOrigin
}
}
#[derive(Debug)]
struct BenchState {
start_block: chain::BlockNumber,
start_time: u64,
start_iter: u64,
duration: u32,
}
#[derive(Debug)]
enum MiningState {
Mining,
Paused,
}
#[derive(Debug)]
struct MiningInfo {
session_id: u32,
state: MiningState,
start_time: u64,
start_iter: u64,
}
// Minimum worker state machine can be reused to replay in GK.
// TODO: shrink size
#[derive(Debug)]
struct WorkerState {
pubkey: WorkerPublicKey,
hashed_id: U256,
registered: bool,
bench_state: Option<BenchState>,
mining_state: Option<MiningInfo>,
}
impl WorkerState {
pub fn new(pubkey: WorkerPublicKey) -> Self {
let raw_pubkey: &[u8] = pubkey.as_ref();
let pkh = blake2_256(raw_pubkey);
let hashed_id: U256 = pkh.into();
Self {
pubkey,
hashed_id,
registered: false,
bench_state: None,
mining_state: None,
}
}
pub fn process_event(
&mut self,
block: &BlockInfo,
event: &SystemEvent,
callback: &mut impl WorkerStateMachineCallback,
log_on: bool,
) {
match event {
SystemEvent::WorkerEvent(evt) => {
if evt.pubkey != self.pubkey {
return;
}
use MiningState::*;
use WorkerEvent::*;
if log_on {
info!("System::handle_event: {:?}", evt.event);
}
match evt.event {
Registered(_) => {
self.registered = true;
}
BenchStart { duration } => {
self.bench_state = Some(BenchState {
start_block: block.block_number,
start_time: block.now_ms,
start_iter: callback.bench_iterations(),
duration,
});
callback.bench_resume();
}
BenchScore(score) => {
if log_on {
info!("My benchmark score is {}", score);
}
}
MiningStart { session_id, .. } => {
self.mining_state = Some(MiningInfo {
session_id,
state: Mining,
start_time: block.now_ms,
start_iter: callback.bench_iterations(),
});
callback.bench_resume();
}
MiningStop => {
self.mining_state = None;
if self.need_pause() {
callback.bench_pause();
}
}
MiningEnterUnresponsive => {
if let Some(info) = &mut self.mining_state {
if let Mining = info.state {
if log_on {
info!("Enter paused");
}
info.state = Paused;
return;
}
}
if log_on {
error!(
"Unexpected event received: {:?}, mining_state= {:?}",
evt.event, self.mining_state
);
}
}
MiningExitUnresponsive => {
if let Some(info) = &mut self.mining_state {
if let Paused = info.state {
if log_on {
info!("Exit paused");
}
info.state = Mining;
return;
}
}
if log_on {
error!(
"Unexpected event received: {:?}, mining_state= {:?}",
evt.event, self.mining_state
);
}
}
}
}
SystemEvent::HeartbeatChallenge(seed_info) => {
self.handle_heartbeat_challenge(block, seed_info, callback, log_on);
}
};
}
fn handle_heartbeat_challenge(
&mut self,
block: &BlockInfo,
seed_info: &HeartbeatChallenge,
callback: &mut impl WorkerStateMachineCallback,
log_on: bool,
) {
if log_on {
info!(
"System::handle_heartbeat_challenge({}, {:?}), registered={:?}, mining_state={:?}",
block.block_number, seed_info, self.registered, self.mining_state
);
}
if !self.registered {
return;
}
let mining_state = if let Some(state) = &mut self.mining_state {
state
} else {
return;
};
if matches!(mining_state.state, MiningState::Paused) {
return;
}
let x = self.hashed_id ^ seed_info.seed;
let online_hit = x <= seed_info.online_target;
// Push queue when necessary
if online_hit {
let iterations = callback.bench_iterations() - mining_state.start_iter;
callback.heartbeat(
mining_state.session_id,
block.block_number,
block.now_ms,
iterations,
);
}
}
fn need_pause(&self) -> bool {
self.bench_state.is_none() && self.mining_state.is_none()
}
fn on_block_processed(
&mut self,
block: &BlockInfo,
callback: &mut impl WorkerStateMachineCallback,
) {
// Handle registering benchmark report
if let Some(BenchState {
start_block,
start_time,
start_iter,
duration,
}) = self.bench_state
{
if block.block_number - start_block >= duration {
self.bench_state = None;
let iterations = callback.bench_iterations() - start_iter;
callback.bench_report(start_time, iterations);
if self.need_pause() {
callback.bench_pause();
}
}
}
}
}
trait WorkerStateMachineCallback {
fn bench_iterations(&self) -> u64 {
0
}
fn bench_resume(&mut self) {}
fn bench_pause(&mut self) {}
fn bench_report(&mut self, _start_time: u64, _iterations: u64) {}
fn heartbeat(
&mut self,
_session_id: u32,
_block_num: chain::BlockNumber,
_block_time: u64,
_iterations: u64,
) {
}
}
struct WorkerSMDelegate<'a>(&'a SignedMessageChannel);
impl WorkerStateMachineCallback for WorkerSMDelegate<'_> {
fn bench_iterations(&self) -> u64 {
benchmark::iteration_counter()
}
fn bench_resume(&mut self) {
benchmark::resume();
}
fn bench_pause(&mut self) {
benchmark::pause();
}
fn bench_report(&mut self, start_time: u64, iterations: u64) {
let report = RegistryEvent::BenchReport {
start_time,
iterations,
};
info!("Reporting benchmark: {:?}", report);
self.0.push_message(&report);
}
fn heartbeat(
&mut self,
session_id: u32,
challenge_block: chain::BlockNumber,
challenge_time: u64,
iterations: u64,
) {
let event = MiningReportEvent::Heartbeat {
session_id,
challenge_block,
challenge_time,
iterations,
};
info!("System: sending {:?}", event);
self.0.push_message(&event);
}
}
type ContractMap = BTreeMap<ContractId, Box<dyn contracts::Contract + Send>>;
pub struct System<Platform> {
platform: Platform,
// Configuration
sealing_path: String,
enable_geoprobing: bool,
geoip_city_db: String,
// Messageing
egress: SignedMessageChannel,
system_events: TypedReceiver<SystemEvent>,
gatekeeper_launch_events: TypedReceiver<GatekeeperLaunch>,
gatekeeper_change_events: TypedReceiver<GatekeeperChange>,
key_distribution_events: TypedReceiver<KeyDistribution>,
pink_events: SecretReceiver<WorkerPinkRequest>,
// Worker
pub(crate) identity_key: sr25519::Pair,
pub(crate) ecdh_key: EcdhKey,
worker_state: WorkerState,
// Gatekeeper
master_key: Option<sr25519::Pair>,
pub(crate) gatekeeper: Option<gk::Gatekeeper<SignedMessageChannel>>,
pub(crate) contracts: ContractMap,
contract_groups: GroupKeeper,
}
impl<Platform: pal::Platform> System<Platform> {
pub fn new(
platform: Platform,
sealing_path: String,
enable_geoprobing: bool,
geoip_city_db: String,
identity_key: sr25519::Pair,
ecdh_key: EcdhKey,
send_mq: &MessageSendQueue,
recv_mq: &mut MessageDispatcher,
contracts: ContractMap,
) -> Self {
let pubkey = identity_key.clone().public();
let sender = MessageOrigin::Worker(pubkey);
let master_key = master_key::try_unseal(sealing_path.clone(), &identity_key, &platform);
System {
platform,
sealing_path,
enable_geoprobing,
geoip_city_db,
egress: send_mq.channel(sender, identity_key.clone()),
system_events: recv_mq.subscribe_bound(),
gatekeeper_launch_events: recv_mq.subscribe_bound(),
gatekeeper_change_events: recv_mq.subscribe_bound(),
key_distribution_events: recv_mq.subscribe_bound(),
pink_events: SecretReceiver::new_secret(
recv_mq.subscribe(WorkerPinkRequest::topic()).into(),
ecdh_key.clone(),
),
identity_key,
ecdh_key,
worker_state: WorkerState::new(pubkey),
master_key,
gatekeeper: None,
contracts,
contract_groups: Default::default(),
}
}
pub fn handle_query(
&mut self,
origin: Option<&chain::AccountId>,
contract_id: &ContractId,
req: OpaqueQuery,
) -> Result<OpaqueReply, OpaqueError> {
let contract = self
.contracts
.get_mut(contract_id)
.ok_or(OpaqueError::ContractNotFound)?;
let mut context = contracts::QueryContext {
contract_groups: &mut self.contract_groups,
};
contract.handle_query(origin, req, &mut context)
}
pub fn process_messages(&mut self, block: &mut BlockInfo) -> anyhow::Result<()> {
if self.enable_geoprobing {
geo_probe::process_block(
block.block_number,
&self.egress,
block.side_task_man,
&self.identity_key,
self.geoip_city_db.clone(),
);
}
loop {
let ok = phala_mq::select_ignore_errors! {
(event, origin) = self.system_events => {
if !origin.is_pallet() {
error!("Invalid SystemEvent sender: {:?}", origin);
continue;
}
self.process_system_event(block, &event)?;
},
(event, origin) = self.gatekeeper_launch_events => {
self.process_gatekeeper_launch_event(block, origin, event);
},
(event, origin) = self.gatekeeper_change_events => {
self.process_gatekeeper_change_event(block, origin, event);
},
(event, origin) = self.key_distribution_events => {
self.process_key_distribution_event(block, origin, event);
},
(event, origin) = self.pink_events => {
self.process_pink_event(block, origin, event);
},
};
if ok.is_none() {
// All messages processed
break;
}
}
self.worker_state
.on_block_processed(block, &mut WorkerSMDelegate(&self.egress));
if let Some(gatekeeper) = &mut self.gatekeeper {
gatekeeper.process_messages(block);
gatekeeper.emit_random_number(block.block_number);
}
let mut env = ExecuteEnv {
block: block,
contract_groups: &mut self.contract_groups,
};
for contract in self.contracts.values_mut() {
contract.process_messages(&mut env);
}
Ok(())
}
fn process_system_event(&mut self, block: &BlockInfo, event: &SystemEvent) -> Result<()> {
self.worker_state
.process_event(block, event, &mut WorkerSMDelegate(&self.egress), true);
Ok(())
}
fn set_master_key(&mut self, master_key: sr25519::Pair, need_restart: bool) {
if self.master_key.is_none() {
master_key::seal(
self.sealing_path.clone(),
&master_key,
&self.identity_key,
&self.platform,
);
self.master_key = Some(master_key);
if need_restart {
panic!("Received master key, please restart pRuntime and pherry");
}
} else if let Some(my_master_key) = &self.master_key {
// TODO.shelven: remove this assertion after we enable master key rotation
assert_eq!(my_master_key.to_raw_vec(), master_key.to_raw_vec());
}
}
fn init_gatekeeper(&mut self, block: &mut BlockInfo) {
assert!(
self.master_key.is_some(),
"Gatekeeper initialization without master key"
);
assert!(
self.gatekeeper.is_none(),
"Duplicated gatekeeper initialization"
);
let gatekeeper = gk::Gatekeeper::new(
self.master_key
.as_ref()
.expect("checked master key above; qed.")
.clone(),
block.recv_mq,
block.send_mq.channel(
MessageOrigin::Gatekeeper,
self.master_key
.as_ref()
.expect("checked master key above; qed.")
.clone(),
),
);
self.gatekeeper = Some(gatekeeper);
}
fn process_gatekeeper_launch_event(
&mut self,
block: &mut BlockInfo,
origin: MessageOrigin,
event: GatekeeperLaunch,
) {
info!("Incoming gatekeeper launch event: {:?}", event);
match event {
GatekeeperLaunch::FirstGatekeeper(new_gatekeeper_event) => {
self.process_first_gatekeeper_event(block, origin, new_gatekeeper_event)
}
GatekeeperLaunch::MasterPubkeyOnChain(_) => {
info!(
"Gatekeeper launches on chain in block {}",
block.block_number
);
if let Some(gatekeeper) = &mut self.gatekeeper {
gatekeeper.master_pubkey_uploaded();
}
}
}
}
/// Generate the master key if this is the first gatekeeper
fn process_first_gatekeeper_event(
&mut self,
block: &mut BlockInfo,
origin: MessageOrigin,
event: NewGatekeeperEvent,
) {
if !origin.is_pallet() {
error!("Invalid origin {:?} sent a {:?}", origin, event);
return;
}
// double check the first gatekeeper is valid on chain
if !chain_state::is_gatekeeper(&event.pubkey, block.storage) {
error!(
"Fatal error: Invalid first gatekeeper registration {:?}",
event
);
panic!("System state poisoned");
}
let my_pubkey = self.identity_key.public();
// if the first gatekeeper reboots, it will possess the master key,
// and should not re-generate it
if my_pubkey == event.pubkey && self.master_key.is_none() {
info!("Gatekeeper: generate master key as the first gatekeeper");
// generate master key as the first gatekeeper
// no need to restart
let master_key = crate::new_sr25519_key();
self.set_master_key(master_key.clone(), false);
// upload the master key on chain via worker egress
info!(
"Gatekeeper: upload master key {} on chain",
hex::encode(master_key.public())
);
let master_pubkey = RegistryEvent::MasterPubkey {
master_pubkey: master_key.public(),
};
self.egress.push_message(&master_pubkey);
}
if self.master_key.is_some() {
info!("Init gatekeeper in block {}", block.block_number);
self.init_gatekeeper(block);
}
if my_pubkey == event.pubkey {
self.gatekeeper
.as_mut()
.expect("gatekeeper must be initializaed here; qed.")
.register_on_chain();
}
}
fn process_gatekeeper_change_event(
&mut self,
block: &mut BlockInfo,
origin: MessageOrigin,
event: GatekeeperChange,
) {
info!("Incoming gatekeeper change event: {:?}", event);
match event {
GatekeeperChange::GatekeeperRegistered(new_gatekeeper_event) => {
self.process_new_gatekeeper_event(block, origin, new_gatekeeper_event)
}
}
}
/// Share the master key to the newly-registered gatekeeper
/// Tick the state if the registered gatekeeper is this worker
fn process_new_gatekeeper_event(
&mut self,
block: &mut BlockInfo,
origin: MessageOrigin,
event: NewGatekeeperEvent,
) {
if !origin.is_pallet() {
error!("Invalid origin {:?} sent a {:?}", origin, event);
return;
}
// double check the registered gatekeeper is valid on chain
if !chain_state::is_gatekeeper(&event.pubkey, block.storage) {
error!(
"Fatal error: Invalid first gatekeeper registration {:?}",
event
);
panic!("System state poisoned");
}
if let Some(gatekeeper) = &mut self.gatekeeper {
gatekeeper.share_master_key(&event.pubkey, &event.ecdh_pubkey, block.block_number);
let my_pubkey = self.identity_key.public();
if my_pubkey == event.pubkey {
gatekeeper.register_on_chain();
}
}
}
fn process_key_distribution_event(
&mut self,
_block: &mut BlockInfo,
origin: MessageOrigin,
event: KeyDistribution,
) {
info!("Incoming key distribution event: {:?}", event);
match event {
KeyDistribution::MasterKeyDistribution(dispatch_master_key_event) => {
self.process_master_key_distribution(origin, dispatch_master_key_event)
}
}
}
fn process_pink_event(
&mut self,
block: &mut BlockInfo,
origin: MessageOrigin,
event: WorkerPinkRequest,
) {
match event {
WorkerPinkRequest::Instantiate {
group_id,
worker,
nonce,
owner,
wasm_bin,
input_data,
salt,
key,
} => {
if worker != self.worker_state.pubkey {
return;
}
info!(
"Incoming pink instantiate event: origin={}, onwer={}, nonce={}, contract_size={}",
origin,
owner,
hex::encode(&nonce),
wasm_bin.len(),
);
if !origin.is_gatekeeper() && !origin.is_pallet() {
error!("Attempt to instantiate a pink instance from out of GK!");
return;
}
let result: Result<(ContractId, EcdhPublicKey), String> = {
let owner = owner.clone();
let contracts = &mut self.contracts;
let contract_groups = &mut self.contract_groups;
let group_id = group_id.clone();
(move || {
let contract_key = sr25519::Pair::restore_from_secret_key(&key);
let ecdh_key = contract_key.derive_ecdh_key().unwrap();
let result = contract_groups
.instantiate_contract(group_id, owner, wasm_bin, input_data, salt);
match result {
Err(err) => Err(err.to_string()),
Ok(pink) => {
let address = pink.id();
let pubkey = EcdhPublicKey(ecdh_key.public());
install_contract(contracts, pink, contract_key, ecdh_key, block);
Ok((address, pubkey))
}
}
})()
};
match &result {
Err(err) => {
error!(
"Instantiate contract error: {}, owner: {:?}, nonce: {}",
err,
owner,
hex::encode(&nonce)
);
}
Ok(addr) => {
info!(
"Contract instantiated: owner: {:?}, nonce: {}, address: {}, group: {}",
owner,
hex::encode(&nonce),
hex::encode(addr.0),
hex::encode(&group_id),
);
}
}
let message = WorkerPinkReport::InstantiateStatus {
nonce,
owner: phala_types::messaging::AccountId(owner.into()),
result: result.map(|(id, pubkey)| ContractInfo {
id,
group_id,
pubkey,
}),
};
info!("pink instantiate status: {:?}", message);
self.egress.push_message(&message);
}
}
}
/// Process encrypted master key from mq
fn process_master_key_distribution(
&mut self,
origin: MessageOrigin,
event: DispatchMasterKeyEvent,
) {
if !origin.is_gatekeeper() {
error!("Invalid origin {:?} sent a {:?}", origin, event);
return;
};
let my_pubkey = self.identity_key.public();
if my_pubkey == event.dest {
let my_ecdh_key = self
.identity_key
.derive_ecdh_key()
.expect("Should never failed with valid identity key; qed.");
let secret = ecdh::agree(&my_ecdh_key, &event.ecdh_pubkey.0)
.expect("Should never failed with valid ecdh key; qed.");
let mut master_key_buff = event.encrypted_master_key.clone();
let master_key = aead::decrypt(&event.iv, &secret, &mut master_key_buff[..])
.expect("Failed to decrypt dispatched master key");
let master_pair = sr25519::Pair::from_seed_slice(master_key)
.expect("Master key seed must be correct; qed.");
info!("Gatekeeper: successfully decrypt received master key");
self.set_master_key(master_pair, true);
}
}
pub fn is_registered(&self) -> bool {
self.worker_state.registered
}
pub fn gatekeeper_status(&self) -> GatekeeperStatus {
let active = match &self.gatekeeper {
Some(gk) => gk.registered_on_chain(),
None => false,
};
let has_key = self.master_key.is_some();
let role = match (has_key, active) {
(true, true) => GatekeeperRole::Active,
(true, false) => GatekeeperRole::Dummy,
_ => GatekeeperRole::None,
};
let master_public_key = self
.master_key
.as_ref()
.map(|k| hex::encode(&k.public()))
.unwrap_or_default();
GatekeeperStatus {
role: role.into(),
master_public_key,
}
}
}
pub fn install_contract<Contract>(
contracts: &mut ContractMap,
contract: Contract,
contract_key: sr25519::Pair,
ecdh_key: EcdhKey,
block: &mut BlockInfo,
) where
Contract: NativeContract + Send + 'static,
<Contract as NativeContract>::Cmd: Send,
{
let contract_id = contract.id();
let sender = MessageOrigin::Contract(contract_id);
let mq = block.send_mq.channel(sender, contract_key);
let contract_key = ecdh_key.clone();
let cmd_mq = PeelingReceiver::new_secret(
block
.recv_mq
.subscribe(contract::command_topic(contract_id))
.into(),
contract_key,
);
let wrapped = Box::new(contracts::NativeCompatContract::new(
contract,
mq,
cmd_mq,
ecdh_key.clone(),
));
contracts.insert(contract_id, wrapped);
}
#[derive(Encode, Decode, Debug)]
pub enum Error {
NotAuthorized,
TxHashNotFound,
Other(String),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::NotAuthorized => write!(f, "not authorized"),
Error::TxHashNotFound => write!(f, "transaction hash not found"),
Error::Other(e) => write!(f, "{}", e),
}
}
}
pub mod chain_state {
use super::*;
use crate::light_validation::utils::storage_prefix;
use crate::storage::Storage;
use parity_scale_codec::Decode;
pub fn is_gatekeeper(pubkey: &WorkerPublicKey, chain_storage: &Storage) -> bool {
let key = storage_prefix("PhalaRegistry", "Gatekeeper");
let gatekeepers = chain_storage
.get(&key)
.map(|v| {
Vec::<WorkerPublicKey>::decode(&mut &v[..])
.expect("Decode value of Gatekeeper Failed. (This should not happen)")
})
.unwrap_or_default();
gatekeepers.contains(pubkey)
}
#[allow(dead_code)]
pub fn read_master_pubkey(chain_storage: &Storage) -> Option<MasterPublicKey> {
let key = storage_prefix("PhalaRegistry", "GatekeeperMasterPubkey");
chain_storage
.get(&key)
.map(|v| {
Some(
MasterPublicKey::decode(&mut &v[..])
.expect("Decode value of MasterPubkey Failed. (This should not happen)"),
)
})
.unwrap_or(None)
}
}
| 32.53224 | 103 | 0.518628 |
905056155481014361b8ea34b3b8d7173c9a3694 | 27,999 | //! Radio driver, Bluetooth Low Energy, NRF52
//!
//! The generic radio configuration i.e., not specific to Bluetooth are functions and similar which
//! do not start with `ble`. Moreover, Bluetooth Low Energy specific radio configuration
//! starts with `ble`
//!
//! For more readability the Bluetooth specific configuration may be moved to separate trait
//!
//! ### Author
//! * Niklas Adolfsson <[email protected]>
//! * Date: July 18, 2017
//!
//! ### Packet Configuration
//! ```txt
//! +----------+------+--------+----+--------+----+---------+-----+
//! | Preamble | Base | Prefix | S0 | Length | S1 | Payload | CRC |
//! +----------+------+--------+----+--------+----+---------+-----+
//! ```
//!
//! * Preamble - 1 byte
//!
//! * Base and prefix forms together the access address
//!
//! * S0, an optional parameter that is configured to indicate how many bytes of
//! the payload is the PDU Type. Configured as 1 byte!
//!
//! * Length, an optional parameter that is configured to indicate how many bits of the
//! payload is the length field. Configured as 8 bits!
//!
//! * S1, Not used
//!
//! * Payload - 2 to 255 bytes
//!
//! * CRC - 3 bytes
use core::cell::Cell;
use core::convert::TryFrom;
use kernel::common::cells::OptionalCell;
use kernel::common::cells::TakeCell;
use kernel::common::registers::{register_bitfields, ReadOnly, ReadWrite, WriteOnly};
use kernel::common::StaticRef;
use kernel::hil::ble_advertising;
use kernel::hil::ble_advertising::RadioChannel;
use kernel::ReturnCode;
use nrf5x::constants::TxPower;
const RADIO_BASE: StaticRef<RadioRegisters> =
unsafe { StaticRef::new(0x40001000 as *const RadioRegisters) };
#[repr(C)]
struct RadioRegisters {
/// Enable Radio in TX mode
/// - Address: 0x000 - 0x004
task_txen: WriteOnly<u32, Task::Register>,
/// Enable Radio in RX mode
/// - Address: 0x004 - 0x008
task_rxen: WriteOnly<u32, Task::Register>,
/// Start Radio
/// - Address: 0x008 - 0x00c
task_start: WriteOnly<u32, Task::Register>,
/// Stop Radio
/// - Address: 0x00c - 0x010
task_stop: WriteOnly<u32, Task::Register>,
/// Disable Radio
/// - Address: 0x010 - 0x014
task_disable: WriteOnly<u32, Task::Register>,
/// Start the RSSI and take one single sample of the receive signal strength
/// - Address: 0x014- 0x018
task_rssistart: WriteOnly<u32, Task::Register>,
/// Stop the RSSI measurement
/// - Address: 0x018 - 0x01c
task_rssistop: WriteOnly<u32, Task::Register>,
/// Start the bit counter
/// - Address: 0x01c - 0x020
task_bcstart: WriteOnly<u32, Task::Register>,
/// Stop the bit counter
/// - Address: 0x020 - 0x024
task_bcstop: WriteOnly<u32, Task::Register>,
/// Reserved
_reserved1: [u32; 55],
/// Radio has ramped up and is ready to be started
/// - Address: 0x100 - 0x104
event_ready: ReadWrite<u32, Event::Register>,
/// Address sent or received
/// - Address: 0x104 - 0x108
event_address: ReadWrite<u32, Event::Register>,
/// Packet payload sent or received
/// - Address: 0x108 - 0x10c
event_payload: ReadWrite<u32, Event::Register>,
/// Packet sent or received
/// - Address: 0x10c - 0x110
event_end: ReadWrite<u32, Event::Register>,
/// Radio has been disabled
/// - Address: 0x110 - 0x114
event_disabled: ReadWrite<u32, Event::Register>,
/// A device address match occurred on the last received packet
/// - Address: 0x114 - 0x118
event_devmatch: ReadWrite<u32>,
/// No device address match occurred on the last received packet
/// - Address: 0x118 - 0x11c
event_devmiss: ReadWrite<u32, Event::Register>,
/// Sampling of receive signal strength complete
/// - Address: 0x11c - 0x120
event_rssiend: ReadWrite<u32, Event::Register>,
/// Reserved
_reserved2: [u32; 2],
/// Bit counter reached bit count value
/// - Address: 0x128 - 0x12c
event_bcmatch: ReadWrite<u32, Event::Register>,
/// Reserved
_reserved3: [u32; 1],
/// Packet received with CRC ok
/// - Address: 0x130 - 0x134
event_crcok: ReadWrite<u32, Event::Register>,
/// Packet received with CRC error
/// - Address: 0x134 - 0x138
crcerror: ReadWrite<u32, Event::Register>,
/// Reserved
_reserved4: [u32; 50],
/// Shortcut register
/// - Address: 0x200 - 0x204
shorts: ReadWrite<u32, Shortcut::Register>,
/// Reserved
_reserved5: [u32; 64],
/// Enable interrupt
/// - Address: 0x304 - 0x308
intenset: ReadWrite<u32, Interrupt::Register>,
/// Disable interrupt
/// - Address: 0x308 - 0x30c
intenclr: ReadWrite<u32, Interrupt::Register>,
/// Reserved
_reserved6: [u32; 61],
/// CRC status
/// - Address: 0x400 - 0x404
crcstatus: ReadOnly<u32, Event::Register>,
/// Reserved
_reserved7: [u32; 1],
/// Received address
/// - Address: 0x408 - 0x40c
rxmatch: ReadOnly<u32, ReceiveMatch::Register>,
/// CRC field of previously received packet
/// - Address: 0x40c - 0x410
rxcrc: ReadOnly<u32, ReceiveCrc::Register>,
/// Device address match index
/// - Address: 0x410 - 0x414
dai: ReadOnly<u32, DeviceAddressIndex::Register>,
/// Reserved
_reserved8: [u32; 60],
/// Packet pointer
/// - Address: 0x504 - 0x508
packetptr: ReadWrite<u32, PacketPointer::Register>,
/// Frequency
/// - Address: 0x508 - 0x50c
frequency: ReadWrite<u32, Frequency::Register>,
/// Output power
/// - Address: 0x50c - 0x510
txpower: ReadWrite<u32, TransmitPower::Register>,
/// Data rate and modulation
/// - Address: 0x510 - 0x514
mode: ReadWrite<u32, Mode::Register>,
/// Packet configuration register 0
/// - Address 0x514 - 0x518
pcnf0: ReadWrite<u32, PacketConfiguration0::Register>,
/// Packet configuration register 1
/// - Address: 0x518 - 0x51c
pcnf1: ReadWrite<u32, PacketConfiguration1::Register>,
/// Base address 0
/// - Address: 0x51c - 0x520
base0: ReadWrite<u32, BaseAddress::Register>,
/// Base address 1
/// - Address: 0x520 - 0x524
base1: ReadWrite<u32, BaseAddress::Register>,
/// Prefix bytes for logical addresses 0-3
/// - Address: 0x524 - 0x528
prefix0: ReadWrite<u32, Prefix0::Register>,
/// Prefix bytes for logical addresses 4-7
/// - Address: 0x528 - 0x52c
prefix1: ReadWrite<u32, Prefix1::Register>,
/// Transmit address select
/// - Address: 0x52c - 0x530
txaddress: ReadWrite<u32, TransmitAddress::Register>,
/// Receive address select
/// - Address: 0x530 - 0x534
rxaddresses: ReadWrite<u32, ReceiveAddresses::Register>,
/// CRC configuration
/// - Address: 0x534 - 0x538
crccnf: ReadWrite<u32, CrcConfiguration::Register>,
/// CRC polynomial
/// - Address: 0x538 - 0x53c
crcpoly: ReadWrite<u32, CrcPolynomial::Register>,
/// CRC initial value
/// - Address: 0x53c - 0x540
crcinit: ReadWrite<u32, CrcInitialValue::Register>,
/// Reserved
_reserved9: [u32; 1],
/// Interframe spacing in microseconds
/// - Address: 0x544 - 0x548
tifs: ReadWrite<u32, InterFrameSpacing::Register>,
/// RSSI sample
/// - Address: 0x548 - 0x54c
rssisample: ReadWrite<u32, RssiSample::Register>,
/// Reserved
_reserved10: [u32; 1],
/// Current radio state
/// - Address: 0x550 - 0x554
state: ReadOnly<u32, State::Register>,
/// Data whitening initial value
/// - Address: 0x554 - 0x558
datawhiteiv: ReadWrite<u32, DataWhiteIv::Register>,
/// Reserved
_reserved11: [u32; 2],
/// Bit counter compare
/// - Address: 0x560 - 0x564
bcc: ReadWrite<u32, BitCounterCompare::Register>,
/// Reserved
_reserved12: [u32; 39],
/// Device address base segments
/// - Address: 0x600 - 0x620
dab: [ReadWrite<u32, DeviceAddressBase::Register>; 8],
/// Device address prefix
/// - Address: 0x620 - 0x640
dap: [ReadWrite<u32, DeviceAddressPrefix::Register>; 8],
/// Device address match configuration
/// - Address: 0x640 - 0x644
dacnf: ReadWrite<u32, DeviceAddressMatch::Register>,
/// Reserved
_reserved13: [u32; 3],
/// Radio mode configuration register
/// - Address: 0x650 - 0x654
modecnf0: ReadWrite<u32, RadioModeConfig::Register>,
/// Reserved
_reserved14: [u32; 618],
/// Peripheral power control
/// - Address: 0xFFC - 0x1000
power: ReadWrite<u32, Task::Register>,
}
register_bitfields! [u32,
/// Task register
Task [
/// Enable task
ENABLE OFFSET(0) NUMBITS(1)
],
/// Event register
Event [
/// Ready event
READY OFFSET(0) NUMBITS(1)
],
/// Shortcut register
Shortcut [
/// Shortcut between READY event and START task
READY_START OFFSET(0) NUMBITS(1),
/// Shortcut between END event and DISABLE task
END_DISABLE OFFSET(1) NUMBITS(1),
/// Shortcut between DISABLED event and TXEN task
DISABLED_TXEN OFFSET(2) NUMBITS(1),
/// Shortcut between DISABLED event and RXEN task
DISABLED_RXEN OFFSET(3) NUMBITS(1),
/// Shortcut between ADDRESS event and RSSISTART task
ADDRESS_RSSISTART OFFSET(4) NUMBITS(1),
/// Shortcut between END event and START task
END_START OFFSET(5) NUMBITS(1),
/// Shortcut between ADDRESS event and BCSTART task
ADDRESS_BCSTART OFFSET(6) NUMBITS(1),
/// Shortcut between DISABLED event and RSSISTOP task
DISABLED_RSSISTOP OFFSET(8) NUMBITS(1)
],
/// Interrupt register
Interrupt [
/// READY event
READY OFFSET(0) NUMBITS(1),
/// ADDRESS event
ADDRESS OFFSET(1) NUMBITS(1),
/// PAYLOAD event
PAYLOAD OFFSET(2) NUMBITS(1),
/// END event
END OFFSET(3) NUMBITS(1),
/// DISABLED event
DISABLED OFFSET(4) NUMBITS(1),
/// DEVMATCH event
DEVMATCH OFFSET(5) NUMBITS(1),
/// DEVMISS event
DEVMISS OFFSET(6) NUMBITS(1),
/// RSSIEND event
RSSIEND OFFSET(7) NUMBITS(1),
/// BCMATCH event
BCMATCH OFFSET(10) NUMBITS(1),
/// CRCOK event
CRCOK OFFSET(12) NUMBITS(1),
/// CRCERROR event
CRCERROR OFFSET(13) NUMBITS(1)
],
/// Receive match register
ReceiveMatch [
/// Logical address of which previous packet was received
MATCH OFFSET(0) NUMBITS(3)
],
/// Received CRC register
ReceiveCrc [
/// CRC field of previously received packet
CRC OFFSET(0) NUMBITS(24)
],
/// Device address match index register
DeviceAddressIndex [
/// Device address match index
/// Index (n) of device address, see DAB\[n\] and DAP\[n\], that got an
/// address match
INDEX OFFSET(0) NUMBITS(3)
],
/// Packet pointer register
PacketPointer [
/// Packet address to be used for the next transmission or reception. When transmitting, the packet pointed to by this
/// address will be transmitted and when receiving, the received packet will be written to this address. This address is a byte
/// aligned ram address.
POINTER OFFSET(0) NUMBITS(32)
],
/// Frequency register
Frequency [
/// Radio channel frequency
/// Frequency = 2400 + FREQUENCY (MHz)
FREQUENCY OFFSET(0) NUMBITS(7) [],
/// Channel map selection.
/// Channel map between 2400 MHZ .. 2500 MHZ
MAP OFFSET(8) NUMBITS(1) [
DEFAULT = 0,
LOW = 1
]
],
/// Transmitting power register
TransmitPower [
/// Radio output power
POWER OFFSET(0) NUMBITS(8) [
POS4DBM = 4,
POS3DBM = 3,
ODBM = 0,
NEG4DBM = 0xfc,
NEG8DBM = 0xf8,
NEG12DBM = 0xf4,
NEG16DBM = 0xf0,
NEG20DBM = 0xec,
NEG40DBM = 0xd8
]
],
/// Data rate and modulation register
Mode [
/// Radio data rate and modulation setting.
/// The radio supports Frequency-shift Keying (FSK) modulation
MODE OFFSET(0) NUMBITS(4) [
NRF_1MBIT = 0,
NRF_2MBIT = 1,
NRF_250KBIT = 2,
BLE_1MBIT = 3
]
],
/// Packet configuration register 0
PacketConfiguration0 [
/// Length on air of LENGTH field in number of bits
LFLEN OFFSET(0) NUMBITS(4) [],
/// Length on air of S0 field in number of bytes
S0LEN OFFSET(8) NUMBITS(1) [],
/// Length on air of S1 field in number of bits.
S1LEN OFFSET(16) NUMBITS(4) [],
/// Include or exclude S1 field in RAM
S1INCL OFFSET(20) NUMBITS(1) [
AUTOMATIC = 0,
INCLUDE = 1
],
/// Length of preamble on air. Decision point: TASKS_START task
PLEN OFFSET(24) NUMBITS(1) [
EIGHT = 0,
SIXTEEN = 1
]
],
/// Packet configuration register 1
PacketConfiguration1 [
/// Maximum length of packet payload
MAXLEN OFFSET(0) NUMBITS(8) [],
/// Static length in number of bytes
STATLEN OFFSET(8) NUMBITS(8) [],
/// Base address length in number of bytes
BALEN OFFSET(16) NUMBITS(3) [],
/// On air endianness
ENDIAN OFFSET(24) NUMBITS(1) [
LITTLE = 0,
BIG = 1
],
/// Enable or disable packet whitening
WHITEEN OFFSET(25) NUMBITS(1) [
DISABLED = 0,
ENABLED = 1
]
],
/// Radio base address register
BaseAddress [
/// BASE0 or BASE1
BASE OFFSET(0) NUMBITS(32)
],
/// Radio prefix0 registers
Prefix0 [
/// Address prefix 0
AP0 OFFSET(0) NUMBITS(8),
/// Address prefix 1
AP1 OFFSET(8) NUMBITS(8),
/// Address prefix 2
AP2 OFFSET(16) NUMBITS(8),
/// Address prefix 3
AP3 OFFSET(24) NUMBITS(8)
],
/// Radio prefix0 registers
Prefix1 [
/// Address prefix 4
AP4 OFFSET(0) NUMBITS(8),
/// Address prefix 5
AP5 OFFSET(8) NUMBITS(8),
/// Address prefix 6
AP6 OFFSET(16) NUMBITS(8),
/// Address prefix 7
AP7 OFFSET(24) NUMBITS(8)
],
/// Transmit address register
TransmitAddress [
/// Logical address to be used when transmitting a packet
ADDRESS OFFSET(0) NUMBITS(3)
],
/// Receive addresses register
ReceiveAddresses [
/// Enable or disable reception on logical address 0-7
ADDRESS OFFSET(0) NUMBITS(8)
],
/// CRC configuration register
CrcConfiguration [
/// CRC length in bytes
LEN OFFSET(0) NUMBITS(2) [
DISABLED = 0,
ONE = 1,
TWO = 2,
THREE = 3
],
/// Include or exclude packet field from CRC calculation
SKIPADDR OFFSET(8) NUMBITS(1) [
INCLUDE = 0,
EXCLUDE = 1
]
],
/// CRC polynomial register
CrcPolynomial [
/// CRC polynomial
CRCPOLY OFFSET(0) NUMBITS(24)
],
/// CRC initial value register
CrcInitialValue [
/// Initial value for CRC calculation
CRCINIT OFFSET(0) NUMBITS(24)
],
/// Inter Frame Spacing in us register
InterFrameSpacing [
/// Inter Frame Spacing in us
/// Inter frame space is the time interval between two consecutive packets. It is defined as the time, in micro seconds, from the
/// end of the last bit of the previous packet to the start of the first bit of the subsequent packet
TIFS OFFSET(0) NUMBITS(8)
],
/// RSSI sample register
RssiSample [
/// RSSI sample result
RSSISAMPLE OFFSET(0) NUMBITS(7)
],
/// Radio state register
State [
/// Current radio state
STATE OFFSET(0) NUMBITS(4) [
DISABLED = 0,
RXRU = 1,
RXIDLE = 2,
RX = 3,
RXDISABLED = 4,
TXRU = 9,
TXIDLE = 10,
TX = 11,
TXDISABLED = 12
]
],
/// Data whitening initial value register
DataWhiteIv [
/// Data whitening initial value. Bit 6 is hard-wired to '1', writing '0'
/// to it has no effect, and it will always be read back and used by the device as '1'
DATEWHITEIV OFFSET(0) NUMBITS(7)
],
/// Bit counter compare register
BitCounterCompare [
/// Bit counter compare
BCC OFFSET(0) NUMBITS(32)
],
/// Device address base register
DeviceAddressBase [
/// Device address base 0-7
DAB OFFSET(0) NUMBITS(32)
],
/// Device address prefix register
DeviceAddressPrefix [
/// Device address prefix 0-7
DAP OFFSET(0) NUMBITS(32)
],
/// Device address match configuration register
DeviceAddressMatch [
/// Enable or disable device address matching on 0-7
ENA OFFSET(0) NUMBITS(8),
/// TxAdd for device address 0-7
TXADD OFFSET(8) NUMBITS(8)
],
/// Radio mode configuration register
RadioModeConfig [
/// Radio ramp-up time
RU OFFSET(0) NUMBITS(1) [
DEFAULT = 0,
FAST = 1
],
/// Default TX value
/// Specifies what the RADIO will transmit when it is not started, i.e. between:
/// RADIO.EVENTS_READY and RADIO.TASKS_START
/// RADIO.EVENTS_END and RADIO.TASKS_START
DTX OFFSET(8) NUMBITS(2) [
B1 = 0,
B0 = 1,
CENTER = 2
]
]
];
static mut PAYLOAD: [u8; nrf5x::constants::RADIO_PAYLOAD_LENGTH] =
[0x00; nrf5x::constants::RADIO_PAYLOAD_LENGTH];
pub struct Radio<'a> {
registers: StaticRef<RadioRegisters>,
tx_power: Cell<TxPower>,
rx_client: OptionalCell<&'a dyn ble_advertising::RxClient>,
tx_client: OptionalCell<&'a dyn ble_advertising::TxClient>,
buffer: TakeCell<'static, [u8]>,
}
impl<'a> Radio<'a> {
pub const fn new() -> Radio<'a> {
Radio {
registers: RADIO_BASE,
tx_power: Cell::new(TxPower::ZerodBm),
rx_client: OptionalCell::empty(),
tx_client: OptionalCell::empty(),
buffer: TakeCell::empty(),
}
}
pub fn is_enabled(&self) -> bool {
self.registers.mode.matches_all(Mode::MODE::BLE_1MBIT)
}
fn tx(&self) {
self.registers.event_ready.write(Event::READY::CLEAR);
self.registers.task_txen.write(Task::ENABLE::SET);
}
fn rx(&self) {
self.registers.event_ready.write(Event::READY::CLEAR);
self.registers.task_rxen.write(Task::ENABLE::SET);
}
fn set_rx_address(&self) {
self.registers
.rxaddresses
.write(ReceiveAddresses::ADDRESS.val(1));
}
fn set_tx_address(&self) {
self.registers
.txaddress
.write(TransmitAddress::ADDRESS.val(0));
}
fn radio_on(&self) {
// reset and enable power
self.registers.power.write(Task::ENABLE::CLEAR);
self.registers.power.write(Task::ENABLE::SET);
}
fn radio_off(&self) {
self.registers.power.write(Task::ENABLE::CLEAR);
}
fn set_tx_power(&self) {
self.registers.txpower.set(self.tx_power.get() as u32);
}
fn set_dma_ptr(&self) {
unsafe {
self.registers.packetptr.set(PAYLOAD.as_ptr() as u32);
}
}
#[inline(never)]
pub fn handle_interrupt(&self) {
self.disable_all_interrupts();
if self.registers.event_ready.is_set(Event::READY) {
self.registers.event_ready.write(Event::READY::CLEAR);
self.registers.event_end.write(Event::READY::CLEAR);
self.registers.task_start.write(Task::ENABLE::SET);
}
if self.registers.event_address.is_set(Event::READY) {
self.registers.event_address.write(Event::READY::CLEAR);
}
if self.registers.event_payload.is_set(Event::READY) {
self.registers.event_payload.write(Event::READY::CLEAR);
}
// tx or rx finished!
if self.registers.event_end.is_set(Event::READY) {
self.registers.event_end.write(Event::READY::CLEAR);
let result = if self.registers.crcstatus.is_set(Event::READY) {
ReturnCode::SUCCESS
} else {
ReturnCode::FAIL
};
match self.registers.state.get() {
nrf5x::constants::RADIO_STATE_TXRU
| nrf5x::constants::RADIO_STATE_TXIDLE
| nrf5x::constants::RADIO_STATE_TXDISABLE
| nrf5x::constants::RADIO_STATE_TX => {
self.radio_off();
self.tx_client
.map(|client| client.transmit_event(self.buffer.take().unwrap(), result));
}
nrf5x::constants::RADIO_STATE_RXRU
| nrf5x::constants::RADIO_STATE_RXIDLE
| nrf5x::constants::RADIO_STATE_RXDISABLE
| nrf5x::constants::RADIO_STATE_RX => {
self.radio_off();
unsafe {
self.rx_client.map(|client| {
// Length is: S0 (1 Byte) + Length (1 Byte) + S1 (0 Bytes) + Payload
// And because the length field is directly read from the packet
// We need to add 2 to length to get the total length
client.receive_event(&mut PAYLOAD, PAYLOAD[1] + 2, result)
});
}
}
// Radio state - Disabled
_ => (),
}
}
self.enable_interrupts();
}
pub fn enable_interrupts(&self) {
self.registers.intenset.write(
Interrupt::READY::SET
+ Interrupt::ADDRESS::SET
+ Interrupt::PAYLOAD::SET
+ Interrupt::END::SET,
);
}
pub fn enable_interrupt(&self, intr: u32) {
self.registers.intenset.set(intr);
}
pub fn clear_interrupt(&self, intr: u32) {
self.registers.intenclr.set(intr);
}
pub fn disable_all_interrupts(&self) {
// disable all possible interrupts
self.registers.intenclr.set(0xffffffff);
}
fn replace_radio_buffer(&self, buf: &'static mut [u8]) -> &'static mut [u8] {
// set payload
for (i, c) in buf.as_ref().iter().enumerate() {
unsafe {
PAYLOAD[i] = *c;
}
}
buf
}
fn ble_initialize(&self, channel: RadioChannel) {
self.radio_on();
self.ble_set_tx_power();
self.ble_set_channel_rate();
self.ble_set_channel_freq(channel);
self.ble_set_data_whitening(channel);
self.set_tx_address();
self.set_rx_address();
self.ble_set_packet_config();
self.ble_set_advertising_access_address();
self.ble_set_crc_config();
self.set_dma_ptr();
}
// BLUETOOTH SPECIFICATION Version 4.2 [Vol 6, Part B], section 3.1.1 CRC Generation
fn ble_set_crc_config(&self) {
self.registers
.crccnf
.write(CrcConfiguration::LEN::THREE + CrcConfiguration::SKIPADDR::EXCLUDE);
self.registers
.crcinit
.set(nrf5x::constants::RADIO_CRCINIT_BLE);
self.registers
.crcpoly
.set(nrf5x::constants::RADIO_CRCPOLY_BLE);
}
// BLUETOOTH SPECIFICATION Version 4.2 [Vol 6, Part B], section 2.1.2 Access Address
// Set access address to 0x8E89BED6
fn ble_set_advertising_access_address(&self) {
self.registers.prefix0.set(0x0000008e);
self.registers.base0.set(0x89bed600);
}
// Packet configuration
// BLUETOOTH SPECIFICATION Version 4.2 [Vol 6, Part B], section 2.1 Packet Format
//
// LSB MSB
// +----------+ +----------------+ +---------------+ +------------+
// | Preamble | - | Access Address | - | PDU | - | CRC |
// | (1 byte) | | (4 bytes) | | (2-255 bytes) | | (3 bytes) |
// +----------+ +----------------+ +---------------+ +------------+
//
fn ble_set_packet_config(&self) {
// sets the header of PDU TYPE to 1 byte
// sets the header length to 1 byte
self.registers.pcnf0.write(
PacketConfiguration0::LFLEN.val(8)
+ PacketConfiguration0::S0LEN.val(1)
+ PacketConfiguration0::S1LEN::CLEAR
+ PacketConfiguration0::S1INCL::CLEAR
+ PacketConfiguration0::PLEN::EIGHT,
);
self.registers.pcnf1.write(
PacketConfiguration1::WHITEEN::ENABLED
+ PacketConfiguration1::ENDIAN::LITTLE
+ PacketConfiguration1::BALEN.val(3)
+ PacketConfiguration1::STATLEN::CLEAR
+ PacketConfiguration1::MAXLEN.val(255),
);
}
// BLUETOOTH SPECIFICATION Version 4.2 [Vol 6, Part A], 4.6 REFERENCE SIGNAL DEFINITION
// Bit Rate = 1 Mb/s ±1 ppm
fn ble_set_channel_rate(&self) {
self.registers.mode.write(Mode::MODE::BLE_1MBIT);
}
// BLUETOOTH SPECIFICATION Version 4.2 [Vol 6, Part B], section 3.2 Data Whitening
// Configure channel index to the LFSR and the hardware solves the rest
fn ble_set_data_whitening(&self, channel: RadioChannel) {
self.registers.datawhiteiv.set(channel.get_channel_index());
}
// BLUETOOTH SPECIFICATION Version 4.2 [Vol 6, Part B], section 1.4.1
// RF Channels: 0 - 39
// Data: 0 - 36
// Advertising: 37, 38, 39
fn ble_set_channel_freq(&self, channel: RadioChannel) {
self.registers
.frequency
.write(Frequency::FREQUENCY.val(channel as u32));
}
// BLUETOOTH SPECIFICATION Version 4.2 [Vol 6, Part B], section 3 TRANSMITTER CHARACTERISTICS
// Minimum Output Power : -20dBm
// Maximum Output Power : +10dBm
//
// no check is required because the BleConfig::set_tx_power() method ensures that only
// valid tranmitting power is configured!
fn ble_set_tx_power(&self) {
self.set_tx_power();
}
}
impl<'a> ble_advertising::BleAdvertisementDriver<'a> for Radio<'a> {
fn transmit_advertisement(&self, buf: &'static mut [u8], _len: usize, channel: RadioChannel) {
let res = self.replace_radio_buffer(buf);
self.buffer.replace(res);
self.ble_initialize(channel);
self.tx();
self.enable_interrupts();
}
fn receive_advertisement(&self, channel: RadioChannel) {
self.ble_initialize(channel);
self.rx();
self.enable_interrupts();
}
fn set_receive_client(&self, client: &'a dyn ble_advertising::RxClient) {
self.rx_client.set(client);
}
fn set_transmit_client(&self, client: &'a dyn ble_advertising::TxClient) {
self.tx_client.set(client);
}
}
impl ble_advertising::BleConfig for Radio<'_> {
// The BLE Advertising Driver validates that the `tx_power` is between -20 to 10 dBm but then
// underlying chip must validate if the current `tx_power` is supported as well
fn set_tx_power(&self, tx_power: u8) -> kernel::ReturnCode {
// Convert u8 to TxPower
match nrf5x::constants::TxPower::try_from(tx_power) {
// Invalid transmitting power, propogate error
Err(_) => kernel::ReturnCode::ENOSUPPORT,
// Valid transmitting power, propogate success
Ok(res) => {
self.tx_power.set(res);
kernel::ReturnCode::SUCCESS
}
}
}
}
| 33.774427 | 137 | 0.584164 |
e837b50b57e2b60989e8df2eb50baa0e2d8fa85b | 8,107 | use std::borrow::Cow;
use futures_util::stream::{Stream, StreamExt, TryStreamExt};
use indexmap::map::IndexMap;
use crate::connection::edge::Edge;
use crate::connection::page_info::PageInfo;
use crate::parser::types::Field;
use crate::resolver_utils::{resolve_container, ContainerType};
use crate::types::connection::{CursorType, EmptyFields};
use crate::{
registry, Context, ContextSelectionSet, ObjectType, OutputValueType, Positioned, Result,
ServerResult, Type, Value,
};
/// Connection type
///
/// Connection is the result of a query for `connection::query`.
pub struct Connection<C, T, EC = EmptyFields, EE = EmptyFields> {
/// All edges of the current page.
edges: Vec<Edge<C, T, EE>>,
additional_fields: EC,
has_previous_page: bool,
has_next_page: bool,
}
impl<C, T, EE> Connection<C, T, EmptyFields, EE> {
/// Create a new connection.
pub fn new(has_previous_page: bool, has_next_page: bool) -> Self {
Connection {
additional_fields: EmptyFields,
has_previous_page,
has_next_page,
edges: Vec::new(),
}
}
}
impl<C, T, EC, EE> Connection<C, T, EC, EE> {
/// Create a new connection, it can have some additional fields.
pub fn with_additional_fields(
has_previous_page: bool,
has_next_page: bool,
additional_fields: EC,
) -> Self {
Connection {
additional_fields,
has_previous_page,
has_next_page,
edges: Vec::new(),
}
}
}
impl<C, T, EC, EE> Connection<C, T, EC, EE> {
/// Convert the edge type and return a new `Connection`.
pub fn map<T2, EE2, F>(self, mut f: F) -> Connection<C, T2, EC, EE2>
where
F: FnMut(Edge<C, T, EE>) -> Edge<C, T2, EE2>,
{
let mut new_edges = Vec::with_capacity(self.edges.len());
for edge in self.edges {
new_edges.push(f(edge));
}
Connection {
edges: new_edges,
additional_fields: self.additional_fields,
has_previous_page: self.has_previous_page,
has_next_page: self.has_next_page,
}
}
/// Convert the node type and return a new `Connection`.
pub fn map_node<T2, F>(self, mut f: F) -> Connection<C, T2, EC, EE>
where
F: FnMut(T) -> T2,
{
self.map(|edge| Edge {
cursor: edge.cursor,
node: f(edge.node),
additional_fields: edge.additional_fields,
})
}
/// Append edges with `IntoIterator<Item = Edge<C, T, EE>>`
pub fn append<I>(&mut self, iter: I)
where
I: IntoIterator<Item = Edge<C, T, EE>>,
{
self.edges.extend(iter);
}
/// Append edges with `IntoIterator<Item = Edge<C, T, EE>>`
pub fn try_append<I>(&mut self, iter: I) -> Result<()>
where
I: IntoIterator<Item = Result<Edge<C, T, EE>>>,
{
for edge in iter {
self.edges.push(edge?);
}
Ok(())
}
/// Append edges with `Stream<Item = Result<Edge<C, T, EE>>>`
pub async fn append_stream<S>(&mut self, stream: S)
where
S: Stream<Item = Edge<C, T, EE>> + Unpin,
{
self.edges.extend(stream.collect::<Vec<_>>().await);
}
/// Append edges with `Stream<Item = Result<Edge<C, T, EE>>>`
pub async fn try_append_stream<S>(&mut self, stream: S) -> Result<()>
where
S: Stream<Item = Result<Edge<C, T, EE>>> + Unpin,
{
self.edges.extend(stream.try_collect::<Vec<_>>().await?);
Ok(())
}
}
impl<C, T, EC, EE> Type for Connection<C, T, EC, EE>
where
C: CursorType,
T: OutputValueType + Send + Sync,
EC: ObjectType + Sync + Send,
EE: ObjectType + Sync + Send,
{
fn type_name() -> Cow<'static, str> {
Cow::Owned(format!("{}Connection", T::type_name()))
}
fn create_type_info(registry: &mut registry::Registry) -> String {
registry.create_type::<Self, _>(|registry| {
EC::create_type_info(registry);
let additional_fields = if let Some(registry::MetaType::Object { fields, .. }) =
registry.types.remove(EC::type_name().as_ref())
{
fields
} else {
unreachable!()
};
registry::MetaType::Object {
name: Self::type_name().to_string(),
description: None,
fields: {
let mut fields = IndexMap::new();
fields.insert(
"pageInfo".to_string(),
registry::MetaField {
name: "pageInfo".to_string(),
description: Some("Information to aid in pagination."),
args: Default::default(),
ty: PageInfo::create_type_info(registry),
deprecation: None,
cache_control: Default::default(),
external: false,
requires: None,
provides: None,
},
);
fields.insert(
"edges".to_string(),
registry::MetaField {
name: "edges".to_string(),
description: Some("A list of edges."),
args: Default::default(),
ty: <Option<Vec<Option<Edge<C, T, EE>>>> as Type>::create_type_info(
registry,
),
deprecation: None,
cache_control: Default::default(),
external: false,
requires: None,
provides: None,
},
);
fields.extend(additional_fields);
fields
},
cache_control: Default::default(),
extends: false,
keys: None,
}
})
}
}
#[async_trait::async_trait]
impl<C, T, EC, EE> ContainerType for Connection<C, T, EC, EE>
where
C: CursorType + Send + Sync,
T: OutputValueType + Send + Sync,
EC: ObjectType + Sync + Send,
EE: ObjectType + Sync + Send,
{
async fn resolve_field(&self, ctx: &Context<'_>) -> ServerResult<Option<Value>> {
if ctx.item.node.name.node == "pageInfo" {
let page_info = PageInfo {
has_previous_page: self.has_previous_page,
has_next_page: self.has_next_page,
start_cursor: self.edges.first().map(|edge| edge.cursor.encode_cursor()),
end_cursor: self.edges.last().map(|edge| edge.cursor.encode_cursor()),
};
let ctx_obj = ctx.with_selection_set(&ctx.item.node.selection_set);
return OutputValueType::resolve(&page_info, &ctx_obj, ctx.item)
.await
.map(Some);
} else if ctx.item.node.name.node == "edges" {
let ctx_obj = ctx.with_selection_set(&ctx.item.node.selection_set);
return OutputValueType::resolve(&self.edges, &ctx_obj, ctx.item)
.await
.map(Some);
}
self.additional_fields.resolve_field(ctx).await
}
}
#[async_trait::async_trait]
impl<C, T, EC, EE> OutputValueType for Connection<C, T, EC, EE>
where
C: CursorType + Send + Sync,
T: OutputValueType + Send + Sync,
EC: ObjectType + Sync + Send,
EE: ObjectType + Sync + Send,
{
async fn resolve(
&self,
ctx: &ContextSelectionSet<'_>,
_field: &Positioned<Field>,
) -> ServerResult<Value> {
resolve_container(ctx, self).await
}
}
impl<C, T, EC, EE> ObjectType for Connection<C, T, EC, EE>
where
C: CursorType + Send + Sync,
T: OutputValueType + Send + Sync,
EC: ObjectType + Sync + Send,
EE: ObjectType + Sync + Send,
{
}
| 32.558233 | 96 | 0.524855 |
bb69e93cb128f2b1ac95a8e3ed57efa7d8014b0d | 82,474 | // Copyright © 2015-2017 winapi-rs developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
//! This module defines the 32-Bit Windows Base APIs
use ctypes::{c_char, c_int, c_long, c_void};
use shared::basetsd::{
DWORD64, DWORD_PTR, LONG_PTR, PDWORD64, PDWORD_PTR, PSIZE_T, PULONG_PTR, SIZE_T, UINT_PTR,
ULONG_PTR,
};
use shared::guiddef::GUID;
use shared::minwindef::{
ATOM, BOOL, BYTE, DWORD, FARPROC, FILETIME, HFILE, HGLOBAL, HLOCAL, HMODULE, HRSRC, LPBOOL,
LPBYTE, LPCVOID, LPDWORD, LPFILETIME, LPVOID, LPWORD, PBOOL, PDWORD, PUCHAR, PULONG, PUSHORT,
UCHAR, UINT, ULONG, USHORT, WORD,
};
use shared::windef::HWND;
use um::cfgmgr32::MAX_PROFILE_LEN;
use um::fileapi::STREAM_INFO_LEVELS;
use um::libloaderapi::{
ENUMRESLANGPROCA, ENUMRESLANGPROCW, ENUMRESNAMEPROCA, ENUMRESTYPEPROCA, ENUMRESTYPEPROCW,
};
use um::minwinbase::{
FILE_INFO_BY_HANDLE_CLASS, FINDEX_INFO_LEVELS, FINDEX_SEARCH_OPS, GET_FILEEX_INFO_LEVELS,
LPOVERLAPPED, LPOVERLAPPED_COMPLETION_ROUTINE, LPSECURITY_ATTRIBUTES, PREASON_CONTEXT,
};
use um::processthreadsapi::{LPPROC_THREAD_ATTRIBUTE_LIST, LPSTARTUPINFOA, STARTUPINFOA, STARTUPINFOW};
use um::winnt::{
BOOLEAN, CHAR, DWORDLONG, EXECUTION_STATE, FILE_ID_128, HANDLE, HRESULT, INT, LANGID,
LARGE_INTEGER, LATENCY_TIME, LONG, LPCCH, LPCH, LPCSTR, LPCWSTR, LPOSVERSIONINFOEXA,
LPOSVERSIONINFOEXW, LPSTR, LPWSTR, MAXLONG, PBOOLEAN, PCONTEXT, PCWSTR, PFIRMWARE_TYPE,
PHANDLE, PIO_COUNTERS, PJOB_SET_ARRAY, PLUID, POWER_REQUEST_TYPE, PPERFORMANCE_DATA,
PPROCESSOR_NUMBER, PQUOTA_LIMITS, PRTL_UMS_SCHEDULER_ENTRY_POINT, PSECURE_MEMORY_CACHE_CALLBACK,
PSID, PSID_NAME_USE, PULONGLONG, PVOID, PWOW64_CONTEXT, PWOW64_LDT_ENTRY, PWSTR,
RTL_UMS_THREAD_INFO_CLASS, STATUS_ABANDONED_WAIT_0, STATUS_USER_APC, STATUS_WAIT_0,
THREAD_BASE_PRIORITY_IDLE, THREAD_BASE_PRIORITY_LOWRT, THREAD_BASE_PRIORITY_MAX,
THREAD_BASE_PRIORITY_MIN, ULARGE_INTEGER, VOID, WAITORTIMERCALLBACK, WCHAR, WOW64_CONTEXT,
};
use vc::vadefs::va_list;
pub const FILE_BEGIN: DWORD = 0;
pub const FILE_CURRENT: DWORD = 1;
pub const FILE_END: DWORD = 2;
pub const WAIT_FAILED: DWORD = 0xFFFFFFFF;
pub const WAIT_OBJECT_0: DWORD = STATUS_WAIT_0 as u32;
pub const WAIT_ABANDONED: DWORD = STATUS_ABANDONED_WAIT_0 as u32;
pub const WAIT_ABANDONED_0: DWORD = STATUS_ABANDONED_WAIT_0 as u32;
pub const WAIT_IO_COMPLETION: DWORD = STATUS_USER_APC as u32;
pub const FILE_FLAG_WRITE_THROUGH: DWORD = 0x80000000;
pub const FILE_FLAG_OVERLAPPED: DWORD = 0x40000000;
pub const FILE_FLAG_NO_BUFFERING: DWORD = 0x20000000;
pub const FILE_FLAG_RANDOM_ACCESS: DWORD = 0x10000000;
pub const FILE_FLAG_SEQUENTIAL_SCAN: DWORD = 0x08000000;
pub const FILE_FLAG_DELETE_ON_CLOSE: DWORD = 0x04000000;
pub const FILE_FLAG_BACKUP_SEMANTICS: DWORD = 0x02000000;
pub const FILE_FLAG_POSIX_SEMANTICS: DWORD = 0x01000000;
pub const FILE_FLAG_SESSION_AWARE: DWORD = 0x00800000;
pub const FILE_FLAG_OPEN_REPARSE_POINT: DWORD = 0x00200000;
pub const FILE_FLAG_OPEN_NO_RECALL: DWORD = 0x00100000;
pub const FILE_FLAG_FIRST_PIPE_INSTANCE: DWORD = 0x00080000;
pub const FILE_FLAG_OPEN_REQUIRING_OPLOCK: DWORD = 0x00040000;
pub const PROGRESS_CONTINUE: DWORD = 0;
pub const PROGRESS_CANCEL: DWORD = 1;
pub const PROGRESS_STOP: DWORD = 2;
pub const PROGRESS_QUIET: DWORD = 3;
pub const CALLBACK_CHUNK_FINISHED: DWORD = 0x00000000;
pub const CALLBACK_STREAM_SWITCH: DWORD = 0x00000001;
pub const COPY_FILE_FAIL_IF_EXISTS: DWORD = 0x00000001;
pub const COPY_FILE_RESTARTABLE: DWORD = 0x00000002;
pub const COPY_FILE_OPEN_SOURCE_FOR_WRITE: DWORD = 0x00000004;
pub const COPY_FILE_ALLOW_DECRYPTED_DESTINATION: DWORD = 0x00000008;
pub const COPY_FILE_COPY_SYMLINK: DWORD = 0x00000800;
pub const COPY_FILE_NO_BUFFERING: DWORD = 0x00001000;
pub const COPY_FILE_REQUEST_SECURITY_PRIVILEGES: DWORD = 0x00002000;
pub const COPY_FILE_RESUME_FROM_PAUSE: DWORD = 0x00004000;
pub const COPY_FILE_NO_OFFLOAD: DWORD = 0x00040000;
pub const REPLACEFILE_WRITE_THROUGH: DWORD = 0x00000001;
pub const REPLACEFILE_IGNORE_MERGE_ERRORS: DWORD = 0x00000002;
pub const REPLACEFILE_IGNORE_ACL_ERRORS: DWORD = 0x00000004;
pub const PIPE_ACCESS_INBOUND: DWORD = 0x00000001;
pub const PIPE_ACCESS_OUTBOUND: DWORD = 0x00000002;
pub const PIPE_ACCESS_DUPLEX: DWORD = 0x00000003;
pub const PIPE_CLIENT_END: DWORD = 0x00000000;
pub const PIPE_SERVER_END: DWORD = 0x00000001;
pub const PIPE_WAIT: DWORD = 0x00000000;
pub const PIPE_NOWAIT: DWORD = 0x00000001;
pub const PIPE_READMODE_BYTE: DWORD = 0x00000000;
pub const PIPE_READMODE_MESSAGE: DWORD = 0x00000002;
pub const PIPE_TYPE_BYTE: DWORD = 0x00000000;
pub const PIPE_TYPE_MESSAGE: DWORD = 0x00000004;
pub const PIPE_ACCEPT_REMOTE_CLIENTS: DWORD = 0x00000000;
pub const PIPE_REJECT_REMOTE_CLIENTS: DWORD = 0x00000008;
pub const PIPE_UNLIMITED_INSTANCES: DWORD = 255;
pub const SECURITY_CONTEXT_TRACKING: DWORD = 0x00040000;
pub const SECURITY_EFFECTIVE_ONLY: DWORD = 0x00080000;
pub const SECURITY_SQOS_PRESENT: DWORD = 0x00100000;
pub const SECURITY_VALID_SQOS_FLAGS: DWORD = 0x001F0000;
FN!{stdcall PFIBER_START_ROUTINE(
lpFiberParameter: LPVOID,
) -> ()}
pub type LPFIBER_START_ROUTINE = PFIBER_START_ROUTINE;
FN!{stdcall PFIBER_CALLOUT_ROUTINE(
lpParameter: LPVOID,
) -> LPVOID}
// FAIL_FAST_*
pub type LPLDT_ENTRY = LPVOID; // TODO - fix this for 32-bit
//SP_SERIALCOMM
//PST_*
// PCF_*
// SP_*
// BAUD_*
// DATABITS_*
// STOPBITS_*
// PARITY_*
STRUCT!{struct COMMPROP {
wPacketLength: WORD,
wPacketVersion: WORD,
dwServiceMask: DWORD,
dwReserved1: DWORD,
dwMaxTxQueue: DWORD,
dwMaxRxQueue: DWORD,
dwMaxBaud: DWORD,
dwProvSubType: DWORD,
dwProvCapabilities: DWORD,
dwSettableParams: DWORD,
dwSettableBaud: DWORD,
wSettableData: WORD,
wSettableStopParity: WORD,
dwCurrentTxQueue: DWORD,
dwCurrentRxQueue: DWORD,
dwProvSpec1: DWORD,
dwProvSpec2: DWORD,
wcProvChar: [WCHAR; 1],
}}
pub type LPCOMMPROP = *mut COMMPROP;
STRUCT!{struct COMSTAT {
BitFields: DWORD,
cbInQue: DWORD,
cbOutQue: DWORD,
}}
BITFIELD!{COMSTAT BitFields: DWORD [
fCtsHold set_fCtsHold[0..1],
fDsrHold set_fDsrHold[1..2],
fRlsdHold set_fRlsdHold[2..3],
fXoffHold set_fXoffHold[3..4],
fXoffSent set_fXoffSent[4..5],
fEof set_fEof[5..6],
fTxim set_fTxim[6..7],
fReserved set_fReserved[7..32],
]}
pub type LPCOMSTAT = *mut COMSTAT;
pub const DTR_CONTROL_DISABLE: DWORD = 0x00;
pub const DTR_CONTROL_ENABLE: DWORD = 0x01;
pub const DTR_CONTROL_HANDSHAKE: DWORD = 0x02;
pub const RTS_CONTROL_DISABLE: DWORD = 0x00;
pub const RTS_CONTROL_ENABLE: DWORD = 0x01;
pub const RTS_CONTROL_HANDSHAKE: DWORD = 0x02;
pub const RTS_CONTROL_TOGGLE: DWORD = 0x03;
STRUCT!{struct DCB {
DCBlength: DWORD,
BaudRate: DWORD,
BitFields: DWORD,
wReserved: WORD,
XonLim: WORD,
XoffLim: WORD,
ByteSize: BYTE,
Parity: BYTE,
StopBits: BYTE,
XonChar: c_char,
XoffChar: c_char,
ErrorChar: c_char,
EofChar: c_char,
EvtChar: c_char,
wReserved1: WORD,
}}
BITFIELD!{DCB BitFields: DWORD [
fBinary set_fBinary[0..1],
fParity set_fParity[1..2],
fOutxCtsFlow set_fOutxCtsFlow[2..3],
fOutxDsrFlow set_fOutxDsrFlow[3..4],
fDtrControl set_fDtrControl[4..6],
fDsrSensitivity set_fDsrSensitivity[6..7],
fTXContinueOnXoff set_fTXContinueOnXoff[7..8],
fOutX set_fOutX[8..9],
fInX set_fInX[9..10],
fErrorChar set_fErrorChar[10..11],
fNull set_fNull[11..12],
fRtsControl set_fRtsControl[12..14],
fAbortOnError set_fAbortOnError[14..15],
fDummy2 set_fDummy2[15..32],
]}
pub type LPDCB = *mut DCB;
STRUCT!{struct COMMTIMEOUTS {
ReadIntervalTimeout: DWORD,
ReadTotalTimeoutMultiplier: DWORD,
ReadTotalTimeoutConstant: DWORD,
WriteTotalTimeoutMultiplier: DWORD,
WriteTotalTimeoutConstant: DWORD,
}}
pub type LPCOMMTIMEOUTS = *mut COMMTIMEOUTS;
STRUCT!{struct COMMCONFIG {
dwSize: DWORD,
wVersion: WORD,
wReserved: WORD,
dcb: DCB,
dwProviderSubType: DWORD,
dwProviderOffset: DWORD,
dwProviderSize: DWORD,
wcProviderData: [WCHAR; 1],
}}
pub type LPCOMMCONFIG = *mut COMMCONFIG;
// GMEM_*
STRUCT!{struct MEMORYSTATUS {
dwLength: DWORD,
dwMemoryLoad: DWORD,
dwTotalPhys: SIZE_T,
dwAvailPhys: SIZE_T,
dwTotalPageFile: SIZE_T,
dwAvailPageFile: SIZE_T,
dwTotalVirtual: SIZE_T,
dwAvailVirtual: SIZE_T,
}}
pub type LPMEMORYSTATUS = *mut MEMORYSTATUS;
// NUMA_NO_PREFERRED_NODE
pub const DEBUG_PROCESS: DWORD = 0x00000001;
pub const DEBUG_ONLY_THIS_PROCESS: DWORD = 0x00000002;
pub const CREATE_SUSPENDED: DWORD = 0x00000004;
pub const DETACHED_PROCESS: DWORD = 0x00000008;
pub const CREATE_NEW_CONSOLE: DWORD = 0x00000010;
pub const NORMAL_PRIORITY_CLASS: DWORD = 0x00000020;
pub const IDLE_PRIORITY_CLASS: DWORD = 0x00000040;
pub const HIGH_PRIORITY_CLASS: DWORD = 0x00000080;
pub const REALTIME_PRIORITY_CLASS: DWORD = 0x00000100;
pub const CREATE_NEW_PROCESS_GROUP: DWORD = 0x00000200;
pub const CREATE_UNICODE_ENVIRONMENT: DWORD = 0x00000400;
pub const CREATE_SEPARATE_WOW_VDM: DWORD = 0x00000800;
pub const CREATE_SHARED_WOW_VDM: DWORD = 0x00001000;
pub const CREATE_FORCEDOS: DWORD = 0x00002000;
pub const BELOW_NORMAL_PRIORITY_CLASS: DWORD = 0x00004000;
pub const ABOVE_NORMAL_PRIORITY_CLASS: DWORD = 0x00008000;
pub const INHERIT_PARENT_AFFINITY: DWORD = 0x00010000;
pub const INHERIT_CALLER_PRIORITY: DWORD = 0x00020000;
pub const CREATE_PROTECTED_PROCESS: DWORD = 0x00040000;
pub const EXTENDED_STARTUPINFO_PRESENT: DWORD = 0x00080000;
pub const PROCESS_MODE_BACKGROUND_BEGIN: DWORD = 0x00100000;
pub const PROCESS_MODE_BACKGROUND_END: DWORD = 0x00200000;
pub const CREATE_BREAKAWAY_FROM_JOB: DWORD = 0x01000000;
pub const CREATE_PRESERVE_CODE_AUTHZ_LEVEL: DWORD = 0x02000000;
pub const CREATE_DEFAULT_ERROR_MODE: DWORD = 0x04000000;
pub const CREATE_NO_WINDOW: DWORD = 0x08000000;
pub const PROFILE_USER: DWORD = 0x10000000;
pub const PROFILE_KERNEL: DWORD = 0x20000000;
pub const PROFILE_SERVER: DWORD = 0x40000000;
pub const CREATE_IGNORE_SYSTEM_DEFAULT: DWORD = 0x80000000;
// STACK_SIZE_PARAM_IS_A_RESERVATION
pub const THREAD_PRIORITY_LOWEST: DWORD = THREAD_BASE_PRIORITY_MIN;
pub const THREAD_PRIORITY_BELOW_NORMAL: DWORD = THREAD_PRIORITY_LOWEST + 1;
pub const THREAD_PRIORITY_NORMAL: DWORD = 0;
pub const THREAD_PRIORITY_HIGHEST: DWORD = THREAD_BASE_PRIORITY_MAX;
pub const THREAD_PRIORITY_ABOVE_NORMAL: DWORD = THREAD_PRIORITY_HIGHEST - 1;
pub const THREAD_PRIORITY_ERROR_RETURN: DWORD = MAXLONG as u32;
pub const THREAD_PRIORITY_TIME_CRITICAL: DWORD = THREAD_BASE_PRIORITY_LOWRT;
pub const THREAD_PRIORITY_IDLE: DWORD = THREAD_BASE_PRIORITY_IDLE;
pub const THREAD_MODE_BACKGROUND_BEGIN: DWORD = 0x00010000;
pub const THREAD_MODE_BACKGROUND_END: DWORD = 0x00020000;
pub const VOLUME_NAME_DOS: DWORD = 0x0;
// VOLUME_NAME_*
// FILE_NAME_*
// JIT_DEBUG_*
pub const DRIVE_UNKNOWN: DWORD = 0;
pub const DRIVE_NO_ROOT_DIR: DWORD = 1;
pub const DRIVE_REMOVABLE: DWORD = 2;
pub const DRIVE_FIXED: DWORD = 3;
pub const DRIVE_REMOTE: DWORD = 4;
pub const DRIVE_CDROM: DWORD = 5;
pub const DRIVE_RAMDISK: DWORD = 6;
// pub fn GetFreeSpace();
pub const FILE_TYPE_UNKNOWN: DWORD = 0x0000;
pub const FILE_TYPE_DISK: DWORD = 0x0001;
pub const FILE_TYPE_CHAR: DWORD = 0x0002;
pub const FILE_TYPE_PIPE: DWORD = 0x0003;
pub const FILE_TYPE_REMOTE: DWORD = 0x8000;
pub const STD_INPUT_HANDLE: DWORD = 0xFFFFFFF6;
pub const STD_OUTPUT_HANDLE: DWORD = 0xFFFFFFF5;
pub const STD_ERROR_HANDLE: DWORD = 0xFFFFFFF4;
pub const NOPARITY: BYTE = 0;
pub const ODDPARITY: BYTE = 1;
pub const EVENPARITY: BYTE = 2;
pub const MARKPARITY: BYTE = 3;
pub const SPACEPARITY: BYTE = 4;
pub const ONESTOPBIT: BYTE = 0;
pub const ONE5STOPBITS: BYTE = 1;
pub const TWOSTOPBITS: BYTE = 2;
pub const IGNORE: DWORD = 0;
pub const INFINITE: DWORD = 0xFFFFFFFF;
pub const CBR_110: DWORD = 110;
pub const CBR_300: DWORD = 300;
pub const CBR_600: DWORD = 600;
pub const CBR_1200: DWORD = 1200;
pub const CBR_2400: DWORD = 2400;
pub const CBR_4800: DWORD = 4800;
pub const CBR_9600: DWORD = 9600;
pub const CBR_14400: DWORD = 14400;
pub const CBR_19200: DWORD = 19200;
pub const CBR_38400: DWORD = 38400;
pub const CBR_56000: DWORD = 56000;
pub const CBR_57600: DWORD = 57600;
pub const CBR_115200: DWORD = 115200;
pub const CBR_128000: DWORD = 128000;
pub const CBR_256000: DWORD = 256000;
// CE_*
// IE_*
// EV_*
pub const SETXOFF: DWORD = 1;
pub const SETXON: DWORD = 2;
pub const SETRTS: DWORD = 3;
pub const CLRRTS: DWORD = 4;
pub const SETDTR: DWORD = 5;
pub const CLRDTR: DWORD = 6;
pub const RESETDEV: DWORD = 7;
pub const SETBREAK: DWORD = 8;
pub const CLRBREAK: DWORD = 9;
pub const PURGE_TXABORT: DWORD = 0x0001;
pub const PURGE_RXABORT: DWORD = 0x0002;
pub const PURGE_TXCLEAR: DWORD = 0x0004;
pub const PURGE_RXCLEAR: DWORD = 0x0008;
pub const MS_CTS_ON: DWORD = 0x0010;
pub const MS_DSR_ON: DWORD = 0x0020;
pub const MS_RING_ON: DWORD = 0x0040;
pub const MS_RLSD_ON: DWORD = 0x0080;
// S_*
// NMPWAIT_*
// FS_*
// OF_*
pub const OFS_MAXPATHNAME: usize = 128;
STRUCT!{struct OFSTRUCT {
cBytes: BYTE,
fFixedDisk: BYTE,
nErrCode: WORD,
Reserved1: WORD,
Reserved2: WORD,
szPathName: [CHAR; OFS_MAXPATHNAME],
}}
pub type POFSTRUCT = *mut OFSTRUCT;
pub type LPOFSTRUCT = *mut OFSTRUCT;
extern "system" {
pub fn GlobalAlloc(
uFlags: UINT,
dwBytes: SIZE_T
) -> HGLOBAL;
pub fn GlobalReAlloc(
hMem: HGLOBAL,
dwBytes: SIZE_T,
uFlags: UINT
) -> HGLOBAL;
pub fn GlobalSize(
hMem: HGLOBAL
) -> SIZE_T;
pub fn GlobalFlags(
hMem: HGLOBAL
) -> UINT;
pub fn GlobalLock(
hMem: HGLOBAL
) -> LPVOID;
pub fn GlobalHandle(
pMem: LPCVOID
) -> HGLOBAL;
pub fn GlobalUnlock(
hMem: HGLOBAL
) -> BOOL;
pub fn GlobalFree(
hMem: HGLOBAL
) -> HGLOBAL;
pub fn GlobalCompact(
dwMinFree: DWORD
) -> SIZE_T;
pub fn GlobalFix(
hMem: HGLOBAL
);
pub fn GlobalUnfix(
hMem: HGLOBAL
);
pub fn GlobalWire(
hMem: HGLOBAL
) -> LPVOID;
pub fn GlobalUnWire(
hMem: HGLOBAL
) -> BOOL;
pub fn GlobalMemoryStatus(
lpBuffer: LPMEMORYSTATUS
);
pub fn LocalAlloc(
uFlags: UINT,
uBytes: SIZE_T
) -> HLOCAL;
pub fn LocalReAlloc(
hMem: HLOCAL,
uBytes: SIZE_T,
uFlags: UINT
) -> HLOCAL;
pub fn LocalLock(
hMem: HLOCAL
) -> LPVOID;
pub fn LocalHandle(
pMem: LPCVOID
) -> HLOCAL;
pub fn LocalUnlock(
hMem: HLOCAL
) -> BOOL;
pub fn LocalSize(
hMem: HLOCAL
) -> SIZE_T;
pub fn LocalFlags(
hMem: HLOCAL,
) -> UINT;
pub fn LocalFree(
hMem: HLOCAL
) -> HLOCAL;
pub fn LocalShrink(
hMem: HLOCAL,
cbNewSize: UINT
) -> SIZE_T;
pub fn LocalCompact(
uMinFree: UINT
) -> SIZE_T;
}
// SCS_*
extern "system" {
pub fn GetBinaryTypeA(
lpApplicationName: LPCSTR,
lpBinaryType: LPDWORD
) -> BOOL;
pub fn GetBinaryTypeW(
lpApplicationName: LPCWSTR,
lpBinaryType: LPDWORD
) -> BOOL;
pub fn GetShortPathNameA(
lpszLongPath: LPCSTR,
lpszShortPath: LPSTR,
cchBuffer: DWORD,
) -> DWORD;
pub fn GetLongPathNameTransactedA(
lpszShortPath: LPCSTR,
lpszLongPath: LPSTR,
cchBuffer: DWORD,
hTransaction: HANDLE,
) -> DWORD;
pub fn GetLongPathNameTransactedW(
lpszShortPath: LPCWSTR,
lpszLongPath: LPWSTR,
cchBuffer: DWORD,
hTransaction: HANDLE,
) -> DWORD;
pub fn GetProcessAffinityMask(
hProcess: HANDLE,
lpProcessAffinityMask: PDWORD_PTR,
lpSystemAffinityMask: PDWORD_PTR,
) -> BOOL;
pub fn SetProcessAffinityMask(
hProcess: HANDLE,
dwProcessAffinityMask: DWORD
) -> BOOL;
pub fn GetProcessIoCounters(
hProcess: HANDLE,
lpIoCounters: PIO_COUNTERS
) -> BOOL;
pub fn GetProcessWorkingSetSize(
hProcess: HANDLE,
lpMinimumWorkingSetSize: PSIZE_T,
lpMaximumWorkingSetSize: PSIZE_T,
) -> BOOL;
pub fn SetProcessWorkingSetSize(
hProcess: HANDLE,
dwMinimumWorkingSetSize: SIZE_T,
dwMaximumWorkingSetSize: SIZE_T,
) -> BOOL;
pub fn FatalExit(
ExitCode: c_int
);
pub fn SetEnvironmentStringsA(
NewEnvironment: LPCH
) -> BOOL;
pub fn SwitchToFiber(
lpFiber: LPVOID
);
pub fn DeleteFiber(
lpFiber: LPVOID
);
pub fn ConvertFiberToThread() -> BOOL;
pub fn CreateFiberEx(
dwStackCommitSize: SIZE_T,
dwStackReserveSize: SIZE_T,
dwFlags: DWORD,
lpStartAddress: LPFIBER_START_ROUTINE,
lpParameter: LPVOID,
) -> LPVOID;
pub fn ConvertThreadToFiberEx(
lpParameter: LPVOID,
dwFlags: DWORD
) -> LPVOID;
pub fn CreateFiber(
dwStackSize: SIZE_T,
lpStartAddress: LPFIBER_START_ROUTINE,
lpParameter: LPVOID,
) -> LPVOID;
pub fn ConvertThreadToFiber(
lpParameter: LPVOID
) -> LPVOID;
}
pub type PUMS_CONTEXT = *mut c_void;
pub type PUMS_COMPLETION_LIST = *mut c_void;
pub type UMS_THREAD_INFO_CLASS = RTL_UMS_THREAD_INFO_CLASS;
pub type PUMS_THREAD_INFO_CLASS = *mut UMS_THREAD_INFO_CLASS;
pub type PUMS_SCHEDULER_ENTRY_POINT = PRTL_UMS_SCHEDULER_ENTRY_POINT;
STRUCT!{struct UMS_SCHEDULER_STARTUP_INFO {
UmsVersion: ULONG,
CompletionList: PUMS_COMPLETION_LIST,
SchedulerProc: PUMS_SCHEDULER_ENTRY_POINT,
SchedulerParam: PVOID,
}}
pub type PUMS_SCHEDULER_STARTUP_INFO = *mut UMS_SCHEDULER_STARTUP_INFO;
STRUCT!{struct UMS_SYSTEM_THREAD_INFORMATION {
UmsVersion: ULONG,
ThreadUmsFlags: ULONG,
}}
BITFIELD!{UMS_SYSTEM_THREAD_INFORMATION ThreadUmsFlags: ULONG [
IsUmsSchedulerThread set_IsUmsSchedulerThread[0..1],
IsUmsWorkerThread set_IsUmsWorkerThread[1..2],
]}
pub type PUMS_SYSTEM_THREAD_INFORMATION = *mut UMS_SYSTEM_THREAD_INFORMATION;
extern "system" {
#[cfg(target_arch = "x86_64")]
pub fn CreateUmsCompletionList(
UmsCompletionList: *mut PUMS_COMPLETION_LIST
) -> BOOL;
#[cfg(target_arch = "x86_64")]
pub fn DequeueUmsCompletionListItems(
UmsCompletionList: PUMS_COMPLETION_LIST,
WaitTimeOut: DWORD,
UmsThreadList: *mut PUMS_CONTEXT,
) -> BOOL;
#[cfg(target_arch = "x86_64")]
pub fn GetUmsCompletionListEvent(
UmsCompletionList: PUMS_COMPLETION_LIST,
UmsCompletionEvent: PHANDLE,
) -> BOOL;
#[cfg(target_arch = "x86_64")]
pub fn ExecuteUmsThread(
UmsThread: PUMS_CONTEXT
) -> BOOL;
#[cfg(target_arch = "x86_64")]
pub fn UmsThreadYield(
SchedulerParam: PVOID
) -> BOOL;
#[cfg(target_arch = "x86_64")]
pub fn DeleteUmsCompletionList(
UmsCompletionList: PUMS_COMPLETION_LIST
) -> BOOL;
#[cfg(target_arch = "x86_64")]
pub fn GetCurrentUmsThread() -> PUMS_CONTEXT;
#[cfg(target_arch = "x86_64")]
pub fn GetNextUmsListItem(
UmsContext: PUMS_CONTEXT
) -> PUMS_CONTEXT;
#[cfg(target_arch = "x86_64")]
pub fn QueryUmsThreadInformation(
UmsThread: PUMS_CONTEXT,
UmsThreadInfoClass: UMS_THREAD_INFO_CLASS,
UmsThreadInformation: PVOID,
UmsThreadInformationLength: ULONG,
ReturnLength: PULONG,
) -> BOOL;
#[cfg(target_arch = "x86_64")]
pub fn SetUmsThreadInformation(
UmsThread: PUMS_CONTEXT,
UmsThreadInfoClass: UMS_THREAD_INFO_CLASS,
UmsThreadInformation: PVOID,
UmsThreadInformationLength: ULONG,
) -> BOOL;
#[cfg(target_arch = "x86_64")]
pub fn DeleteUmsThreadContext(
UmsThread: PUMS_CONTEXT
) -> BOOL;
#[cfg(target_arch = "x86_64")]
pub fn CreateUmsThreadContext(
lpUmsThread: *mut PUMS_CONTEXT
) -> BOOL;
#[cfg(target_arch = "x86_64")]
pub fn EnterUmsSchedulingMode(
SchedulerStartupInfo: PUMS_SCHEDULER_STARTUP_INFO
) -> BOOL;
#[cfg(target_arch = "x86_64")]
pub fn GetUmsSystemThreadInformation(
ThreadHandle: HANDLE,
SystemThreadInfo: PUMS_SYSTEM_THREAD_INFORMATION,
) -> BOOL;
pub fn SetThreadAffinityMask(
hThread: HANDLE,
dwThreadAffinityMask: DWORD_PTR
) -> DWORD_PTR;
pub fn SetProcessDEPPolicy(
dwFlags: DWORD
) -> BOOL;
pub fn GetProcessDEPPolicy(
hProcess: HANDLE,
lpFlags: LPDWORD,
lpPermanent: PBOOL
) -> BOOL;
pub fn RequestWakeupLatency(
latency: LATENCY_TIME
) -> BOOL;
pub fn IsSystemResumeAutomatic() -> BOOL;
pub fn GetThreadSelectorEntry(
hThread: HANDLE,
dwSelector: DWORD,
lpSelectorEntry: LPLDT_ENTRY,
) -> BOOL;
pub fn SetThreadExecutionState(
esFlags: EXECUTION_STATE
) -> EXECUTION_STATE;
pub fn PowerCreateRequest(
Context: PREASON_CONTEXT
) -> HANDLE;
pub fn PowerSetRequest(
PowerRequest: HANDLE,
RequestType: POWER_REQUEST_TYPE
) -> BOOL;
pub fn PowerClearRequest(
PowerRequest: HANDLE,
RequestType: POWER_REQUEST_TYPE
) -> BOOL;
pub fn RestoreLastError(
dwErrCode: DWORD
);
pub fn SetFileCompletionNotificationModes(
FileHandle: HANDLE,
Flags: UCHAR
) -> BOOL;
}
pub const SEM_FAILCRITICALERRORS: UINT = 0x0001;
pub const SEM_NOGPFAULTERRORBOX: UINT = 0x0002;
pub const SEM_NOALIGNMENTFAULTEXCEPT: UINT = 0x0004;
pub const SEM_NOOPENFILEERRORBOX: UINT = 0x8000;
extern "system" {
pub fn Wow64GetThreadContext(
hThread: HANDLE,
lpContext: PWOW64_CONTEXT
) -> BOOL;
pub fn Wow64SetThreadContext(
hThread: HANDLE,
lpContext: *const WOW64_CONTEXT
) -> BOOL;
pub fn Wow64GetThreadSelectorEntry(
hThread: HANDLE,
dwSelector: DWORD,
lpSelectorEntry: PWOW64_LDT_ENTRY,
) -> BOOL;
pub fn Wow64SuspendThread(
hThread: HANDLE
) -> DWORD;
pub fn DebugSetProcessKillOnExit(
KillOnExit: BOOL
) -> BOOL;
pub fn DebugBreakProcess(
Process: HANDLE
) -> BOOL;
pub fn PulseEvent(
hEvent: HANDLE
) -> BOOL;
pub fn GlobalDeleteAtom(
nAtom: ATOM
) -> ATOM;
pub fn InitAtomTable(
nSize: DWORD
) -> BOOL;
pub fn DeleteAtom(
nAtom: ATOM
) -> ATOM;
pub fn SetHandleCount(
uNumber: UINT
) -> UINT;
pub fn RequestDeviceWakeup(
hDevice: HANDLE
) -> BOOL;
pub fn CancelDeviceWakeupRequest(
hDevice: HANDLE
) -> BOOL;
pub fn GetDevicePowerState(
hDevice: HANDLE,
pfOn: *mut BOOL
) -> BOOL;
pub fn SetMessageWaitingIndicator(
hMsgIndicator: HANDLE,
ulMsgCount: ULONG
) -> BOOL;
pub fn SetFileShortNameA(
hFile: HANDLE,
lpShortName: LPCSTR
) -> BOOL;
pub fn SetFileShortNameW(
hFile: HANDLE,
lpShortName: LPCWSTR
) -> BOOL;
}
pub const HANDLE_FLAG_INHERIT: DWORD = 0x00000001;
pub const HANDLE_FLAG_PROTECT_FROM_CLOSE: DWORD = 0x00000002;
extern "system" {
pub fn LoadModule(
lpModuleName: LPCSTR,
lpParameterBlock: LPVOID
) -> DWORD;
pub fn WinExec(
lpCmdLine: LPCSTR,
uCmdShow: UINT
) -> UINT;
// ClearCommBreak
// ClearCommError
// SetupComm
// EscapeCommFunction
// GetCommConfig
// GetCommMask
// GetCommProperties
// GetCommModemStatus
// GetCommState
// GetCommTimeouts
// PurgeComm
// SetCommBreak
// SetCommConfig
// SetCommMask
// SetCommState
// SetCommTimeouts
// TransmitCommChar
// WaitCommEvent
pub fn SetTapePosition(
hDevice: HANDLE,
dwPositionMethod: DWORD,
dwPartition: DWORD,
dwOffsetLow: DWORD,
dwOffsetHigh: DWORD,
bImmediate: BOOL
) -> DWORD;
pub fn GetTapePosition(
hDevice: HANDLE,
dwPositionType: DWORD,
lpdwPartition: LPDWORD,
lpdwOffsetLow: LPDWORD,
lpdwOffsetHigh: LPDWORD
) -> DWORD;
pub fn PrepareTape(
hDevice: HANDLE,
dwOperation: DWORD,
bImmediate: BOOL
) -> DWORD;
pub fn EraseTape(
hDevice: HANDLE,
dwEraseType: DWORD,
bImmediate: BOOL
) -> DWORD;
pub fn CreateTapePartition(
hDevice: HANDLE,
dwPartitionMethod: DWORD,
dwCount: DWORD,
dwSize: DWORD,
) -> DWORD;
pub fn WriteTapemark(
hDevice: HANDLE,
dwTapemarkType: DWORD,
dwTapemarkCount: DWORD,
bImmediate: BOOL,
) -> DWORD;
pub fn GetTapeStatus(
hDevice: HANDLE
) -> DWORD;
pub fn GetTapeParameters(
hDevice: HANDLE,
dwOperation: DWORD,
lpdwSize: LPDWORD,
lpTapeInformation: LPVOID
) -> DWORD;
pub fn SetTapeParameters(
hDevice: HANDLE,
dwOperation: DWORD,
lpTapeInformation: LPVOID,
) -> DWORD;
pub fn MulDiv(
nNumber: c_int,
nNumerator: c_int,
nDenominator: c_int
) -> c_int;
}
ENUM!{enum DEP_SYSTEM_POLICY_TYPE {
DEPPolicyAlwaysOff = 0,
DEPPolicyAlwaysOn,
DEPPolicyOptIn,
DEPPolicyOptOut,
DEPTotalPolicyCount,
}}
extern "system" {
pub fn GetSystemDEPPolicy() -> DEP_SYSTEM_POLICY_TYPE;
pub fn GetSystemRegistryQuota(
pdwQuotaAllowed: PDWORD,
pdwQuotaUsed: PDWORD
) -> BOOL;
pub fn FileTimeToDosDateTime(
lpFileTime: *const FILETIME,
lpFatDate: LPWORD,
lpFatTime: LPWORD,
) -> BOOL;
pub fn DosDateTimeToFileTime(
wFatDate: WORD,
wFatTime: WORD,
lpFileTime: LPFILETIME
) -> BOOL;
pub fn FormatMessageA(
dwFlags: DWORD,
lpSource: LPCVOID,
dwMessageId: DWORD,
dwLanguageId: DWORD,
lpBuffer: LPSTR,
nSize: DWORD,
Arguments: *mut va_list,
) -> DWORD;
pub fn FormatMessageW(
dwFlags: DWORD,
lpSource: LPCVOID,
dwMessageId: DWORD,
dwLanguageId: DWORD,
lpBuffer: LPWSTR,
nSize: DWORD,
Arguments: *mut va_list,
) -> DWORD;
}
pub const FORMAT_MESSAGE_IGNORE_INSERTS: DWORD = 0x00000200;
pub const FORMAT_MESSAGE_FROM_STRING: DWORD = 0x00000400;
pub const FORMAT_MESSAGE_FROM_HMODULE: DWORD = 0x00000800;
pub const FORMAT_MESSAGE_FROM_SYSTEM: DWORD = 0x00001000;
pub const FORMAT_MESSAGE_ARGUMENT_ARRAY: DWORD = 0x00002000;
pub const FORMAT_MESSAGE_MAX_WIDTH_MASK: DWORD = 0x000000FF;
pub const FORMAT_MESSAGE_ALLOCATE_BUFFER: DWORD = 0x00000100;
extern "system" {
pub fn CreateMailslotA(
lpName: LPCSTR,
nMaxMessageSize: DWORD,
lReadTimeout: DWORD,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
) -> HANDLE;
pub fn CreateMailslotW(
lpName: LPCWSTR,
nMaxMessageSize: DWORD,
lReadTimeout: DWORD,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
) -> HANDLE;
pub fn GetMailslotInfo(
hMailslot: HANDLE,
lpMaxMessageSize: LPDWORD,
lpNextSize: LPDWORD,
lpMessageCount: LPDWORD,
lpReadTimeout: LPDWORD,
) -> BOOL;
pub fn SetMailslotInfo(
hMailslot: HANDLE,
lReadTimeout: DWORD
) -> BOOL;
// pub fn EncryptFileA();
// pub fn EncryptFileW();
// pub fn DecryptFileA();
// pub fn DecryptFileW();
// pub fn FileEncryptionStatusA();
// pub fn FileEncryptionStatusW();
// pub fn OpenEncryptedFileRawA();
// pub fn OpenEncryptedFileRawW();
// pub fn ReadEncryptedFileRaw();
// pub fn WriteEncryptedFileRaw();
// pub fn CloseEncryptedFileRaw();
pub fn lstrcmpA(
lpString1: LPCSTR,
lpString2: LPCSTR
) -> c_int;
pub fn lstrcmpW(
lpString1: LPCWSTR,
lpString2: LPCWSTR
) -> c_int;
pub fn lstrcmpiA(
lpString1: LPCSTR,
lpString2: LPCSTR
) -> c_int;
pub fn lstrcmpiW(
lpString1: LPCWSTR,
lpString2: LPCWSTR
) -> c_int;
pub fn lstrcpynA(
lpString1: LPSTR,
lpString2: LPCSTR,
iMaxLength: c_int
) -> LPSTR;
pub fn lstrcpynW(
lpString1: LPWSTR,
lpString2: LPCWSTR,
iMaxLength: c_int
) -> LPWSTR;
pub fn lstrcpyA(
lpString1: LPSTR,
lpString2: LPCSTR
) -> LPSTR;
pub fn lstrcpyW(
lpString1: LPWSTR,
lpString2: LPCWSTR
) -> LPWSTR;
pub fn lstrcatA(
lpString1: LPSTR,
lpString2: LPCSTR
) -> LPSTR;
pub fn lstrcatW(
lpString1: LPWSTR,
lpString2: LPCWSTR
) -> LPWSTR;
pub fn lstrlenA(
lpString: LPCSTR
) -> c_int;
pub fn lstrlenW(
lpString: LPCWSTR
) -> c_int;
pub fn OpenFile(
lpFileName: LPCSTR,
lpReOpenBuff: LPOFSTRUCT,
uStyle: UINT
) -> HFILE;
pub fn _lopen(
lpPathName: LPCSTR,
iReadWrite: c_int
) -> HFILE;
pub fn _lcreat(
lpPathName: LPCSTR,
iAttrubute: c_int
) -> HFILE;
pub fn _lread(
hFile: HFILE,
lpBuffer: LPVOID,
uBytes: UINT
) -> UINT;
pub fn _lwrite(
hFile: HFILE,
lpBuffer: LPCCH,
uBytes: UINT
) -> UINT;
pub fn _hread(
hFile: HFILE,
lpBuffer: LPVOID,
lBytes: c_long
) -> c_long;
pub fn _hwrite(
hFile: HFILE,
lpBuffer: LPCCH,
lBytes: c_long
) -> c_long;
pub fn _lclose(
hFile: HFILE
) -> HFILE;
pub fn _llseek(
hFile: HFILE,
lOffset: LONG,
iOrigin: c_int
) -> LONG;
// pub fn IsTextUnicode();
// pub fn SignalObjectAndWait();
pub fn BackupRead(
hFile: HANDLE,
lpBuffer: LPBYTE,
nNumberOfBytesToRead: DWORD,
lpNumberOfBytesRead: LPDWORD,
bAbort: BOOL,
bProcessSecurity: BOOL,
lpContext: *mut LPVOID,
) -> BOOL;
pub fn BackupSeek(
hFile: HANDLE,
dwLowBytesToSeek: DWORD,
dwHighBytesToSeek: DWORD,
lpdwLowByteSeeked: LPDWORD,
lpdwHighByteSeeked: LPDWORD,
lpContext: *mut LPVOID,
) -> BOOL;
pub fn BackupWrite(
hFile: HANDLE,
lpBuffer: LPBYTE,
nNumberOfBytesToWrite: DWORD,
lpNumberOfBytesWritten: LPDWORD,
bAbort: BOOL,
bProcessSecurity: BOOL,
lpContext: *mut LPVOID,
) -> BOOL;
}
//2886
pub const STARTF_USESHOWWINDOW: DWORD = 0x00000001;
pub const STARTF_USESIZE: DWORD = 0x00000002;
pub const STARTF_USEPOSITION: DWORD = 0x00000004;
pub const STARTF_USECOUNTCHARS: DWORD = 0x00000008;
pub const STARTF_USEFILLATTRIBUTE: DWORD = 0x00000010;
pub const STARTF_RUNFULLSCREEN: DWORD = 0x00000020;
pub const STARTF_FORCEONFEEDBACK: DWORD = 0x00000040;
pub const STARTF_FORCEOFFFEEDBACK: DWORD = 0x00000080;
pub const STARTF_USESTDHANDLES: DWORD = 0x00000100;
pub const STARTF_USEHOTKEY: DWORD = 0x00000200;
pub const STARTF_TITLEISLINKNAME: DWORD = 0x00000800;
pub const STARTF_TITLEISAPPID: DWORD = 0x00001000;
pub const STARTF_PREVENTPINNING: DWORD = 0x00002000;
pub const STARTF_UNTRUSTEDSOURCE: DWORD = 0x00008000;
STRUCT!{struct STARTUPINFOEXA {
StartupInfo: STARTUPINFOA,
lpAttributeList: LPPROC_THREAD_ATTRIBUTE_LIST,
}}
pub type LPSTARTUPINFOEXA = *mut STARTUPINFOEXA;
STRUCT!{struct STARTUPINFOEXW {
StartupInfo: STARTUPINFOW,
lpAttributeList: LPPROC_THREAD_ATTRIBUTE_LIST,
}}
pub type LPSTARTUPINFOEXW = *mut STARTUPINFOEXW;
extern "system" {
pub fn OpenMutexA(
dwDesiredAccess: DWORD,
bInheritHandle: BOOL,
lpName: LPCSTR
) -> HANDLE;
pub fn CreateSemaphoreA(
lpSemaphoreAttributes: LPSECURITY_ATTRIBUTES,
lInitialCount: LONG,
lMaximumCount: LONG,
lpName: LPCSTR,
) -> HANDLE;
pub fn OpenSemaphoreA(
dwDesiredAccess: DWORD,
bInheritHandle: BOOL,
lpName: LPCSTR
) -> HANDLE;
pub fn CreateWaitableTimerA(
lpTimerAttributes: LPSECURITY_ATTRIBUTES,
bManualReset: BOOL,
lpTimerName: LPCSTR,
) -> HANDLE;
pub fn OpenWaitableTimerA(
dwDesiredAccess: DWORD,
bInheritHandle: BOOL,
lpTimerName: LPCSTR,
) -> HANDLE;
pub fn CreateSemaphoreExA(
lpSemaphoreAttributes: LPSECURITY_ATTRIBUTES,
lInitialCount: LONG,
lMaximumCount: LONG,
lpName: LPCSTR,
dwFlags: DWORD,
dwDesiredAccess: DWORD,
) -> HANDLE;
pub fn CreateWaitableTimerExA(
lpTimerAttributes: LPSECURITY_ATTRIBUTES,
lpTimerName: LPCSTR,
dwFlags: DWORD,
dwDesiredAccess: DWORD,
) -> HANDLE;
pub fn CreateFileMappingA(
hFile: HANDLE,
lpAttributes: LPSECURITY_ATTRIBUTES,
flProtect: DWORD,
dwMaximumSizeHigh: DWORD,
dwMaximumSizeLow: DWORD,
lpName: LPCSTR,
) -> HANDLE;
pub fn CreateFileMappingNumaA(
hFile: HANDLE,
lpFileMappingAttributes: LPSECURITY_ATTRIBUTES,
flProtect: DWORD,
dwMaximumSizeHigh: DWORD,
dwMaximumSizeLow: DWORD,
lpName: LPCSTR,
nndPreferred: DWORD,
) -> HANDLE;
pub fn OpenFileMappingA(
dwDesiredAccess: DWORD,
bInheritHandle: BOOL,
lpName: LPCSTR,
) -> HANDLE;
pub fn GetLogicalDriveStringsA(
nBufferLength: DWORD,
lpBuffer: LPSTR
) -> DWORD;
pub fn LoadPackagedLibrary(
lpwLibFileName: LPCWSTR,
Reserved: DWORD
) -> HMODULE;
pub fn QueryFullProcessImageNameA(
hProcess: HANDLE,
dwFlags: DWORD,
lpExeName: LPSTR,
lpdwSize: PDWORD,
) -> BOOL;
pub fn QueryFullProcessImageNameW(
hProcess: HANDLE,
dwFlags: DWORD,
lpExeName: LPWSTR,
lpdwSize: PDWORD,
) -> BOOL;
}
//3233
extern "system" {
pub fn GetStartupInfoA(
lpStartupInfo: LPSTARTUPINFOA
);
pub fn GetFirmwareEnvironmentVariableA(
lpName: LPCSTR,
lpGuid: LPCSTR,
pBuffer: PVOID,
nSize: DWORD,
) -> DWORD;
pub fn GetFirmwareEnvironmentVariableW(
lpName: LPCWSTR,
lpGuid: LPCWSTR,
pBuffer: PVOID,
nSize: DWORD,
) -> DWORD;
pub fn GetFirmwareEnvironmentVariableExA(
lpName: LPCSTR,
lpGuid: LPCSTR,
pBuffer: PVOID,
nSize: DWORD,
pdwAttribubutes: PDWORD,
) -> DWORD;
pub fn GetFirmwareEnvironmentVariableExW(
lpName: LPCWSTR,
lpGuid: LPCWSTR,
pBuffer: PVOID,
nSize: DWORD,
pdwAttribubutes: PDWORD,
) -> DWORD;
pub fn SetFirmwareEnvironmentVariableA(
lpName: LPCSTR,
lpGuid: LPCSTR,
pValue: PVOID,
nSize: DWORD,
) -> BOOL;
pub fn SetFirmwareEnvironmentVariableW(
lpName: LPCWSTR,
lpGuid: LPCWSTR,
pValue: PVOID,
nSize: DWORD,
) -> BOOL;
pub fn SetFirmwareEnvironmentVariableExA(
lpName: LPCSTR,
lpGuid: LPCSTR,
pValue: PVOID,
nSize: DWORD,
dwAttributes: DWORD,
) -> BOOL;
pub fn SetFirmwareEnvironmentVariableExW(
lpName: LPCWSTR,
lpGuid: LPCWSTR,
pValue: PVOID,
nSize: DWORD,
dwAttributes: DWORD,
) -> BOOL;
pub fn GetFirmwareType(
FirmwareType: PFIRMWARE_TYPE
) -> BOOL;
pub fn IsNativeVhdBoot(
NativeVhdBoot: PBOOL
) -> BOOL;
pub fn FindResourceA(
hModule: HMODULE,
lpName: LPCSTR,
lpType: LPCSTR
) -> HRSRC;
pub fn FindResourceExA(
hModule: HMODULE,
lpName: LPCSTR,
lpType: LPCSTR,
wLanguage: WORD,
) -> HRSRC;
pub fn EnumResourceTypesA(
hModule: HMODULE,
lpEnumFunc: ENUMRESTYPEPROCA,
lParam: LONG_PTR,
) -> BOOL;
pub fn EnumResourceTypesW(
hModule: HMODULE,
lpEnumFunc: ENUMRESTYPEPROCW,
lParam: LONG_PTR,
) -> BOOL;
pub fn EnumResourceNamesA(
hModule: HMODULE,
lpType: LPCSTR,
lpEnumFunc: ENUMRESNAMEPROCA,
lParam: LONG_PTR,
) -> BOOL;
pub fn EnumResourceLanguagesA(
hModule: HMODULE,
lpType: LPCSTR,
lpName: LPCSTR,
lpEnumFunc: ENUMRESLANGPROCA,
lParam: LONG_PTR,
) -> BOOL;
pub fn EnumResourceLanguagesW(
hModule: HMODULE,
lpType: LPCWSTR,
lpName: LPCWSTR,
lpEnumFunc: ENUMRESLANGPROCW,
lParam: LONG_PTR,
) -> BOOL;
pub fn BeginUpdateResourceA(
pFileName: LPCSTR,
bDeleteExistingResources: BOOL
) -> HANDLE;
pub fn BeginUpdateResourceW(
pFileName: LPCWSTR,
bDeleteExistingResources: BOOL
) -> HANDLE;
pub fn UpdateResourceA(
hUpdate: HANDLE,
lpType: LPCSTR,
lpName: LPCSTR,
wLanguage: WORD,
lpData: LPVOID,
cb: DWORD,
) -> BOOL;
pub fn UpdateResourceW(
hUpdate: HANDLE,
lpType: LPCWSTR,
lpName: LPCWSTR,
wLanguage: WORD,
lpData: LPVOID,
cb: DWORD,
) -> BOOL;
pub fn EndUpdateResourceA(
hUpdate: HANDLE,
fDiscard: BOOL
) -> BOOL;
pub fn EndUpdateResourceW(
hUpdate: HANDLE,
fDiscard: BOOL
) -> BOOL;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn GlobalAddAtomA(
lpString: LPCSTR
) -> ATOM;
pub fn GlobalAddAtomW(
lpString: LPCWSTR
) -> ATOM;
pub fn GlobalAddAtomExA(
lpString: LPCSTR,
Flags: DWORD
) -> ATOM;
pub fn GlobalAddAtomExW(
lpString: LPCWSTR,
Flags: DWORD
) -> ATOM;
pub fn GlobalFindAtomA(
lpString: LPCSTR
) -> ATOM;
pub fn GlobalFindAtomW(
lpString: LPCWSTR
) -> ATOM;
pub fn GlobalGetAtomNameA(
nAtom: ATOM,
lpBuffer: LPSTR,
nSize: c_int
) -> UINT;
pub fn GlobalGetAtomNameW(
nAtom: ATOM,
lpBuffer: LPWSTR,
nSize: c_int
) -> UINT;
pub fn AddAtomA(
lpString: LPCSTR
) -> ATOM;
pub fn AddAtomW(
lpString: LPCWSTR
) -> ATOM;
pub fn FindAtomA(
lpString: LPCSTR
) -> ATOM;
pub fn FindAtomW(
lpString: LPCWSTR
) -> ATOM;
pub fn GetAtomNameA(
nAtom: ATOM,
lpBuffer: LPSTR,
nSize: c_int
) -> UINT;
pub fn GetAtomNameW(
nAtom: ATOM,
lpBuffer: LPWSTR,
nSize: c_int
) -> UINT;
pub fn GetProfileIntA(
lpAppName: LPCSTR,
lpKeyName: LPCSTR,
nDefault: INT
) -> UINT;
pub fn GetProfileIntW(
lpAppName: LPCWSTR,
lpKeyName: LPCWSTR,
nDefault: INT
) -> UINT;
pub fn GetProfileStringA(
lpAppName: LPCSTR,
lpKeyName: LPCSTR,
lpDefault: LPCSTR,
lpReturnedString: LPSTR,
nSize: DWORD,
) -> DWORD;
pub fn GetProfileStringW(
lpAppName: LPCWSTR,
lpKeyName: LPCWSTR,
lpDefault: LPCWSTR,
lpReturnedString: LPWSTR,
nSize: DWORD,
) -> DWORD;
pub fn WriteProfileStringA(
lpAppName: LPCSTR,
lpKeyName: LPCSTR,
lpString: LPCSTR
) -> BOOL;
pub fn WriteProfileStringW(
lpAppName: LPCWSTR,
lpKeyName: LPCWSTR,
lpString: LPCWSTR
) -> BOOL;
pub fn GetProfileSectionA(
lpAppName: LPCSTR,
lpReturnedString: LPSTR,
nSize: DWORD
) -> DWORD;
pub fn GetProfileSectionW(
lpAppName: LPCWSTR,
lpReturnedString: LPWSTR,
nSize: DWORD
) -> DWORD;
pub fn WriteProfileSectionA(
lpAppName: LPCSTR,
lpString: LPCSTR
) -> BOOL;
pub fn WriteProfileSectionW(
lpAppName: LPCWSTR,
lpString: LPCWSTR
) -> BOOL;
pub fn GetPrivateProfileIntA(
lpAppName: LPCSTR,
lpKeyName: LPCSTR,
nDefault: INT,
lpFileName: LPCSTR,
) -> UINT;
pub fn GetPrivateProfileIntW(
lpAppName: LPCWSTR,
lpKeyName: LPCWSTR,
nDefault: INT,
lpFileName: LPCWSTR,
) -> UINT;
pub fn GetPrivateProfileStringA(
lpAppName: LPCSTR,
lpKeyName: LPCSTR,
lpDefault: LPCSTR,
lpReturnedString: LPSTR,
nSize: DWORD,
lpFileName: LPCSTR,
) -> DWORD;
pub fn GetPrivateProfileStringW(
lpAppName: LPCWSTR,
lpKeyName: LPCWSTR,
lpDefault: LPCWSTR,
lpReturnedString: LPWSTR,
nSize: DWORD,
lpFileName: LPCWSTR,
) -> DWORD;
pub fn WritePrivateProfileStringA(
lpAppName: LPCSTR,
lpKeyName: LPCSTR,
lpString: LPCSTR,
lpFileName: LPCSTR,
) -> BOOL;
pub fn WritePrivateProfileStringW(
lpAppName: LPCWSTR,
lpKeyName: LPCWSTR,
lpString: LPCWSTR,
lpFileName: LPCWSTR,
) -> BOOL;
pub fn GetPrivateProfileSectionA(
lpAppName: LPCSTR,
lpReturnedString: LPSTR,
nSize: DWORD,
lpFileName: LPCSTR,
) -> DWORD;
pub fn GetPrivateProfileSectionW(
lpAppName: LPCWSTR,
lpReturnedString: LPWSTR,
nSize: DWORD,
lpFileName: LPCWSTR,
) -> DWORD;
pub fn WritePrivateProfileSectionA(
lpAppName: LPCSTR,
lpString: LPCSTR,
lpFileName: LPCSTR,
) -> BOOL;
pub fn WritePrivateProfileSectionW(
lpAppName: LPCWSTR,
lpString: LPCWSTR,
lpFileName: LPCWSTR,
) -> BOOL;
pub fn GetPrivateProfileSectionNamesA(
lpszReturnBuffer: LPSTR,
nSize: DWORD,
lpFileName: LPCSTR,
) -> DWORD;
pub fn GetPrivateProfileSectionNamesW(
lpszReturnBuffer: LPWSTR,
nSize: DWORD,
lpFileName: LPCWSTR,
) -> DWORD;
pub fn GetPrivateProfileStructA(
lpszSection: LPCSTR,
lpszKey: LPCSTR,
lpStruct: LPVOID,
uSizeStruct: UINT,
szFile: LPCSTR,
) -> BOOL;
pub fn GetPrivateProfileStructW(
lpszSection: LPCWSTR,
lpszKey: LPCWSTR,
lpStruct: LPVOID,
uSizeStruct: UINT,
szFile: LPCWSTR,
) -> BOOL;
pub fn WritePrivateProfileStructA(
lpszSection: LPCSTR,
lpszKey: LPCSTR,
lpStruct: LPVOID,
uSizeStruct: UINT,
szFile: LPCSTR,
) -> BOOL;
pub fn WritePrivateProfileStructW(
lpszSection: LPCWSTR,
lpszKey: LPCWSTR,
lpStruct: LPVOID,
uSizeStruct: UINT,
szFile: LPCWSTR,
) -> BOOL;
pub fn Wow64EnableWow64FsRedirection(
Wow64FsEnableRedirection: BOOLEAN
) -> BOOLEAN;
pub fn SetDllDirectoryA(
lpPathName: LPCSTR
) -> BOOL;
pub fn SetDllDirectoryW(
lpPathName: LPCWSTR
) -> BOOL;
pub fn GetDllDirectoryA(
nBufferLength: DWORD,
lpBuffer: LPSTR
) -> DWORD;
pub fn GetDllDirectoryW(
nBufferLength: DWORD,
lpBuffer: LPWSTR
) -> DWORD;
pub fn SetSearchPathMode(
Flags: DWORD
) -> BOOL;
pub fn CreateDirectoryExA(
lpTemplateDirectory: LPCSTR,
lpNewDirectory: LPCSTR,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
) -> BOOL;
pub fn CreateDirectoryExW(
lpTemplateDirectory: LPCWSTR,
lpNewDirectory: LPCWSTR,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
) -> BOOL;
pub fn CreateDirectoryTransactedA(
lpTemplateDirectory: LPCSTR,
lpNewDirectory: LPCSTR,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
hTransaction: HANDLE,
) -> BOOL;
pub fn CreateDirectoryTransactedW(
lpTemplateDirectory: LPCWSTR,
lpNewDirectory: LPCWSTR,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
hTransaction: HANDLE,
) -> BOOL;
pub fn RemoveDirectoryTransactedA(
lpPathName: LPCSTR,
hTransaction: HANDLE
) -> BOOL;
pub fn RemoveDirectoryTransactedW(
lpPathName: LPCWSTR,
hTransaction: HANDLE
) -> BOOL;
pub fn GetFullPathNameTransactedA(
lpFileName: LPCSTR,
nBufferLength: DWORD,
lpBuffer: LPSTR,
lpFilePart: *mut LPSTR,
hTransaction: HANDLE,
) -> DWORD;
pub fn GetFullPathNameTransactedW(
lpFileName: LPCWSTR,
nBufferLength: DWORD,
lpBuffer: LPWSTR,
lpFilePart: *mut LPWSTR,
hTransaction: HANDLE,
);
pub fn DefineDosDeviceA(
dwFlags: DWORD,
lpDeviceName: LPCSTR,
lpTargetPath: LPCSTR
) -> BOOL;
pub fn QueryDosDeviceA(
lpDeviceName: LPCSTR,
lpTargetPath: LPSTR,
ucchMax: DWORD
) -> DWORD;
pub fn CreateFileTransactedA(
lpFileName: LPCSTR,
dwDesiredAccess: DWORD,
dwShareMode: DWORD,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
dwCreationDisposition: DWORD,
dwFlagsAndAttributes: DWORD,
hTemplateFile: HANDLE,
hTransaction: HANDLE,
pusMiniVersion: PUSHORT,
lpExtendedParameter: PVOID,
) -> HANDLE;
pub fn CreateFileTransactedW(
lpFileName: LPCWSTR,
dwDesiredAccess: DWORD,
dwShareMode: DWORD,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
dwCreationDisposition: DWORD,
dwFlagsAndAttributes: DWORD,
hTemplateFile: HANDLE,
hTransaction: HANDLE,
pusMiniVersion: PUSHORT,
lpExtendedParameter: PVOID,
) -> HANDLE;
pub fn ReOpenFile(
hOriginalFile: HANDLE,
dwDesiredAccess: DWORD,
dwShareMode: DWORD,
dwFlags: DWORD,
) -> HANDLE;
pub fn SetFileAttributesTransactedA(
lpFileName: LPCSTR,
dwFileAttributes: DWORD,
hTransaction: HANDLE,
) -> BOOL;
pub fn SetFileAttributesTransactedW(
lpFileName: LPCWSTR,
dwFileAttributes: DWORD,
hTransaction: HANDLE,
) -> BOOL;
pub fn GetFileAttributesTransactedA(
lpFileName: LPCSTR,
fInfoLevelId: GET_FILEEX_INFO_LEVELS,
lpFileInformation: LPVOID,
hTransaction: HANDLE,
) -> BOOL;
pub fn GetFileAttributesTransactedW(
lpFileName: LPCWSTR,
fInfoLevelId: GET_FILEEX_INFO_LEVELS,
lpFileInformation: LPVOID,
hTransaction: HANDLE,
) -> BOOL;
pub fn GetCompressedFileSizeTransactedA(
lpFileName: LPCSTR,
lpFileSizeHigh: LPDWORD,
hTransaction: HANDLE,
) -> DWORD;
pub fn GetCompressedFileSizeTransactedW(
lpFileName: LPCWSTR,
lpFileSizeHigh: LPDWORD,
hTransaction: HANDLE,
);
pub fn DeleteFileTransactedA(
lpFileName: LPCSTR,
hTransaction: HANDLE
) -> BOOL;
pub fn DeleteFileTransactedW(
lpFileName: LPCWSTR,
hTransaction: HANDLE
) -> BOOL;
pub fn CheckNameLegalDOS8Dot3A(
lpName: LPCSTR,
lpOemName: LPSTR,
OemNameSize: DWORD,
pbNameContainsSpaces: PBOOL,
pbNameLegal: PBOOL,
) -> BOOL;
pub fn CheckNameLegalDOS8Dot3W(
lpName: LPCWSTR,
lpOemName: LPSTR,
OemNameSize: DWORD,
pbNameContainsSpaces: PBOOL,
pbNameLegal: PBOOL,
) -> BOOL;
pub fn FindFirstFileTransactedA(
lpFileName: LPCSTR,
fInfoLevelId: FINDEX_INFO_LEVELS,
lpFindFileData: LPVOID,
fSearchOp: FINDEX_SEARCH_OPS,
lpSearchFilter: LPVOID,
dwAdditionalFlags: DWORD,
hTransaction: HANDLE,
) -> HANDLE;
pub fn FindFirstFileTransactedW(
lpFileName: LPCWSTR,
fInfoLevelId: FINDEX_INFO_LEVELS,
lpFindFileData: LPVOID,
fSearchOp: FINDEX_SEARCH_OPS,
lpSearchFilter: LPVOID,
dwAdditionalFlags: DWORD,
hTransaction: HANDLE,
) -> HANDLE;
pub fn CopyFileA(
lpExistingFileName: LPCSTR,
lpNewFileName: LPCSTR,
bFailIfExists: BOOL
) -> BOOL;
pub fn CopyFileW(
lpExistingFileName: LPCWSTR,
lpNewFileName: LPCWSTR,
bFailIfExists: BOOL
) -> BOOL;
}
FN!{stdcall LPPROGRESS_ROUTINE(
TotalFileSize: LARGE_INTEGER,
TotalBytesTransferred: LARGE_INTEGER,
StreamSize: LARGE_INTEGER,
StreamBytesTransferred: LARGE_INTEGER,
dwStreamNumber: DWORD,
dwCallbackReason: DWORD,
hSourceFile: HANDLE,
hDestinationFile: HANDLE,
lpData: LPVOID,
) -> DWORD}
extern "system" {
pub fn CopyFileExA(
lpExistingFileName: LPCSTR,
lpNewFileName: LPCSTR,
lpProgressRoutine: LPPROGRESS_ROUTINE,
lpData: LPVOID,
pbCancel: LPBOOL,
dwCopyFlags: DWORD,
) -> BOOL;
pub fn CopyFileExW(
lpExistingFileName: LPCWSTR,
lpNewFileName: LPCWSTR,
lpProgressRoutine: LPPROGRESS_ROUTINE,
lpData: LPVOID,
pbCancel: LPBOOL,
dwCopyFlags: DWORD,
) -> BOOL;
pub fn CopyFileTransactedA(
lpExistingFileName: LPCWSTR,
lpNewFileName: LPCWSTR,
lpProgressRoutine: LPPROGRESS_ROUTINE,
lpData: LPVOID,
pbCancel: LPBOOL,
dwCopyFlags: DWORD,
hTransaction: HANDLE,
) -> BOOL;
pub fn CopyFileTransactedW(
lpExistingFileName: LPCWSTR,
lpNewFileName: LPCWSTR,
lpProgressRoutine: LPPROGRESS_ROUTINE,
lpData: LPVOID,
pbCancel: LPBOOL,
dwCopyFlags: DWORD,
hTransaction: HANDLE,
) -> BOOL;
}
ENUM!{enum COPYFILE2_MESSAGE_TYPE {
COPYFILE2_CALLBACK_NONE = 0,
COPYFILE2_CALLBACK_CHUNK_STARTED,
COPYFILE2_CALLBACK_CHUNK_FINISHED,
COPYFILE2_CALLBACK_STREAM_STARTED,
COPYFILE2_CALLBACK_STREAM_FINISHED,
COPYFILE2_CALLBACK_POLL_CONTINUE,
COPYFILE2_CALLBACK_ERROR,
COPYFILE2_CALLBACK_MAX,
}}
ENUM!{enum COPYFILE2_MESSAGE_ACTION {
COPYFILE2_PROGRESS_CONTINUE = 0,
COPYFILE2_PROGRESS_CANCEL,
COPYFILE2_PROGRESS_STOP,
COPYFILE2_PROGRESS_QUIET,
COPYFILE2_PROGRESS_PAUSE,
}}
ENUM!{enum COPYFILE2_COPY_PHASE {
COPYFILE2_PHASE_NONE = 0,
COPYFILE2_PHASE_PREPARE_SOURCE,
COPYFILE2_PHASE_PREPARE_DEST,
COPYFILE2_PHASE_READ_SOURCE,
COPYFILE2_PHASE_WRITE_DESTINATION,
COPYFILE2_PHASE_SERVER_COPY,
COPYFILE2_PHASE_NAMEGRAFT_COPY,
COPYFILE2_PHASE_MAX,
}}
STRUCT!{struct COPYFILE2_MESSAGE_ChunkStarted {
dwStreamNumber: DWORD,
dwReserved: DWORD,
hSourceFile: HANDLE,
hDestinationFile: HANDLE,
uliChunkNumber: ULARGE_INTEGER,
uliChunkSize: ULARGE_INTEGER,
uliStreamSize: ULARGE_INTEGER,
uliTotalFileSize: ULARGE_INTEGER,
}}
STRUCT!{struct COPYFILE2_MESSAGE_ChunkFinished {
dwStreamNumber: DWORD,
dwFlags: DWORD,
hSourceFile: HANDLE,
hDestinationFile: HANDLE,
uliChunkNumber: ULARGE_INTEGER,
uliChunkSize: ULARGE_INTEGER,
uliStreamSize: ULARGE_INTEGER,
uliStreamBytesTransferred: ULARGE_INTEGER,
uliTotalFileSize: ULARGE_INTEGER,
uliTotalBytesTransferred: ULARGE_INTEGER,
}}
STRUCT!{struct COPYFILE2_MESSAGE_StreamStarted {
dwStreamNumber: DWORD,
dwReserved: DWORD,
hSourceFile: HANDLE,
hDestinationFile: HANDLE,
uliStreamSize: ULARGE_INTEGER,
uliTotalFileSize: ULARGE_INTEGER,
}}
STRUCT!{struct COPYFILE2_MESSAGE_StreamFinished {
dwStreamNumber: DWORD,
dwReserved: DWORD,
hSourceFile: HANDLE,
hDestinationFile: HANDLE,
uliStreamSize: ULARGE_INTEGER,
uliStreamBytesTransferred: ULARGE_INTEGER,
uliTotalFileSize: ULARGE_INTEGER,
uliTotalBytesTransferred: ULARGE_INTEGER,
}}
STRUCT!{struct COPYFILE2_MESSAGE_PollContinue {
dwReserved: DWORD,
}}
STRUCT!{struct COPYFILE2_MESSAGE_Error {
CopyPhase: COPYFILE2_COPY_PHASE,
dwStreamNumber: DWORD,
hrFailure: HRESULT,
dwReserved: DWORD,
uliChunkNumber: ULARGE_INTEGER,
uliStreamSize: ULARGE_INTEGER,
uliStreamBytesTransferred: ULARGE_INTEGER,
uliTotalFileSize: ULARGE_INTEGER,
uliTotalBytesTransferred: ULARGE_INTEGER,
}}
UNION!{union COPYFILE2_MESSAGE_Info {
[u64; 8] [u64; 9],
ChunkStarted ChunkStarted_mut: COPYFILE2_MESSAGE_ChunkStarted,
ChunkFinished ChunkFinished_mut: COPYFILE2_MESSAGE_ChunkFinished,
StreamStarted StreamStarted_mut: COPYFILE2_MESSAGE_StreamStarted,
StreamFinished StreamFinished_mut: COPYFILE2_MESSAGE_StreamFinished,
PollContinue PollContinue_mut: COPYFILE2_MESSAGE_PollContinue,
Error Error_mut: COPYFILE2_MESSAGE_Error,
}}
STRUCT!{struct COPYFILE2_MESSAGE {
Type: COPYFILE2_MESSAGE_TYPE,
dwPadding: DWORD,
Info: COPYFILE2_MESSAGE_Info,
}}
FN!{stdcall PCOPYFILE2_PROGRESS_ROUTINE(
pMessage: *const COPYFILE2_MESSAGE,
pvCallbackContext: PVOID,
) -> COPYFILE2_MESSAGE_ACTION}
STRUCT!{struct COPYFILE2_EXTENDED_PARAMETERS {
dwSize: DWORD,
dwCopyFlags: DWORD,
pfCancel: *mut BOOL,
pProgressRoutine: PCOPYFILE2_PROGRESS_ROUTINE,
pvCallbackContext: PVOID,
}}
extern "system" {
pub fn CopyFile2(
pwszExistingFileName: PCWSTR,
pwszNewFileName: PCWSTR,
pExtendedParameters: *mut COPYFILE2_EXTENDED_PARAMETERS,
) -> HRESULT;
pub fn MoveFileA(
lpExistingFileName: LPCSTR,
lpNewFileName: LPCSTR
) -> BOOL;
pub fn MoveFileW(
lpExistingFileName: LPCWSTR,
lpNewFileName: LPCWSTR
) -> BOOL;
pub fn MoveFileExA(
lpExistingFileName: LPCSTR,
lpNewFileName: LPCSTR,
dwFlags: DWORD
) -> BOOL;
pub fn MoveFileExW(
lpExistingFileName: LPCWSTR,
lpNewFileName: LPCWSTR,
dwFlags: DWORD
) -> BOOL;
pub fn MoveFileWithProgressA(
lpExistingFileName: LPCSTR,
lpNewFileName: LPCSTR,
lpProgressRoutine: LPPROGRESS_ROUTINE,
lpData: LPVOID,
dwFlags: DWORD,
) -> BOOL;
pub fn MoveFileWithProgressW(
lpExistingFileName: LPCWSTR,
lpNewFileName: LPCWSTR,
lpProgressRoutine: LPPROGRESS_ROUTINE,
lpData: LPVOID,
dwFlags: DWORD,
) -> BOOL;
pub fn MoveFileTransactedA(
lpExistingFileName: LPCSTR,
lpNewFileName: LPCSTR,
lpProgressRoutine: LPPROGRESS_ROUTINE,
lpData: LPVOID,
dwFlags: DWORD,
hTransaction: HANDLE,
) -> BOOL;
pub fn MoveFileTransactedW(
lpExistingFileName: LPCWSTR,
lpNewFileName: LPCWSTR,
lpProgressRoutine: LPPROGRESS_ROUTINE,
lpData: LPVOID,
dwFlags: DWORD,
hTransaction: HANDLE,
) -> BOOL;
}
pub const MOVEFILE_REPLACE_EXISTING: DWORD = 0x00000001;
pub const MOVEFILE_COPY_ALLOWED: DWORD = 0x00000002;
pub const MOVEFILE_DELAY_UNTIL_REBOOT: DWORD = 0x00000004;
pub const MOVEFILE_WRITE_THROUGH: DWORD = 0x00000008;
pub const MOVEFILE_CREATE_HARDLINK: DWORD = 0x00000010;
pub const MOVEFILE_FAIL_IF_NOT_TRACKABLE: DWORD = 0x00000020;
extern "system" {
pub fn ReplaceFileA(
lpReplacedFileName: LPCSTR,
lpReplacementFileName: LPCSTR,
lpBackupFileName: LPCSTR,
dwReplaceFlags: DWORD,
lpExclude: LPVOID,
lpReserved: LPVOID,
);
pub fn ReplaceFileW(
lpReplacedFileName: LPCWSTR,
lpReplacementFileName: LPCWSTR,
lpBackupFileName: LPCWSTR,
dwReplaceFlags: DWORD,
lpExclude: LPVOID,
lpReserved: LPVOID,
);
pub fn CreateHardLinkA(
lpFileName: LPCSTR,
lpExistingFileName: LPCSTR,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
) -> BOOL;
pub fn CreateHardLinkW(
lpFileName: LPCWSTR,
lpExistingFileName: LPCWSTR,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
) -> BOOL;
pub fn CreateHardLinkTransactedA(
lpFileName: LPCSTR,
lpExistingFileName: LPCSTR,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
hTransaction: HANDLE,
) -> BOOL;
pub fn CreateHardLinkTransactedW(
lpFileName: LPCWSTR,
lpExistingFileName: LPCWSTR,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
hTransaction: HANDLE,
);
pub fn FindFirstStreamTransactedW(
lpFileName: LPCWSTR,
InfoLevel: STREAM_INFO_LEVELS,
lpFindStreamData: LPVOID,
dwFlags: DWORD,
hTransaction: HANDLE,
) -> HANDLE;
pub fn FindFirstFileNameTransactedW(
lpFileName: LPCWSTR,
dwFlags: DWORD,
StringLength: LPDWORD,
LinkName: PWSTR,
hTransaction: HANDLE,
) -> HANDLE;
pub fn CreateNamedPipeA(
lpName: LPCSTR,
dwOpenMode: DWORD,
dwPipeMode: DWORD,
nMaxInstances: DWORD,
nOutBufferSize: DWORD,
nInBufferSize: DWORD,
nDefaultTimeOut: DWORD,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
) -> HANDLE;
pub fn GetNamedPipeHandleStateA(
hNamedPipe: HANDLE,
lpState: LPDWORD,
lpCurInstances: LPDWORD,
lpMaxCollectionCount: LPDWORD,
lpCollectDataTimeout: LPDWORD,
lpUserName: LPSTR,
nMaxUserNameSize: DWORD,
) -> BOOL;
pub fn CallNamedPipeA(
lpNamedPipeName: LPCSTR,
lpInBuffer: LPVOID,
nInBufferSize: DWORD,
lpOutBuffer: LPVOID,
nOutBufferSize: DWORD,
lpBytesRead: LPDWORD,
nTimeOut: DWORD,
) -> BOOL;
pub fn WaitNamedPipeA(
lpNamedPipeName: LPCSTR,
nTimeOut: DWORD
) -> BOOL;
pub fn GetNamedPipeClientComputerNameA(
Pipe: HANDLE,
ClientComputerName: LPSTR,
ClientComputerNameLength: ULONG,
) -> BOOL;
pub fn GetNamedPipeClientProcessId(
Pipe: HANDLE,
ClientProcessId: PULONG
) -> BOOL;
pub fn GetNamedPipeClientSessionId(
Pipe: HANDLE,
ClientSessionId: PULONG
) -> BOOL;
pub fn GetNamedPipeServerProcessId(
Pipe: HANDLE,
ServerProcessId: PULONG
) -> BOOL;
pub fn GetNamedPipeServerSessionId(
Pipe: HANDLE,
ServerSessionId: PULONG
) -> BOOL;
pub fn SetVolumeLabelA(
lpRootPathName: LPCSTR,
lpVolumeName: LPCSTR
) -> BOOL;
pub fn SetVolumeLabelW(
lpRootPathName: LPCWSTR,
lpVolumeName: LPCWSTR
) -> BOOL;
pub fn SetFileBandwidthReservation(
hFile: HANDLE,
nPeriodMilliseconds: DWORD,
nBytesPerPeriod: DWORD,
bDiscardable: BOOL,
lpTransferSize: LPDWORD,
lpNumOutstandingRequests: LPDWORD,
) -> BOOL;
pub fn GetFileBandwidthReservation(
hFile: HANDLE,
lpPeriodMilliseconds: LPDWORD,
lpBytesPerPeriod: LPDWORD,
pDiscardable: LPBOOL,
lpTransferSize: LPDWORD,
lpNumOutstandingRequests: LPDWORD,
) -> BOOL;
// pub fn ClearEventLogA();
// pub fn ClearEventLogW();
// pub fn BackupEventLogA();
// pub fn BackupEventLogW();
// pub fn CloseEventLog();
pub fn DeregisterEventSource(
hEventLog: HANDLE,
) -> BOOL;
// pub fn NotifyChangeEventLog();
// pub fn GetNumberOfEventLogRecords();
// pub fn GetOldestEventLogRecord();
// pub fn OpenEventLogA();
// pub fn OpenEventLogW();
pub fn RegisterEventSourceA(
lpUNCServerName: LPCSTR,
lpSourceName: LPCSTR,
) -> HANDLE;
pub fn RegisterEventSourceW(
lpUNCServerName: LPCWSTR,
lpSourceName: LPCWSTR,
) -> HANDLE;
// pub fn OpenBackupEventLogA();
// pub fn OpenBackupEventLogW();
// pub fn ReadEventLogA();
// pub fn ReadEventLogW();
pub fn ReportEventA(
hEventLog: HANDLE,
wType: WORD,
wCategory: WORD,
dwEventID: DWORD,
lpUserSid: PSID,
wNumStrings: WORD,
dwDataSize: DWORD,
lpStrings: *mut LPCSTR,
lpRawData: LPVOID,
) -> BOOL;
pub fn ReportEventW(
hEventLog: HANDLE,
wType: WORD,
wCategory: WORD,
dwEventID: DWORD,
lpUserSid: PSID,
wNumStrings: WORD,
dwDataSize: DWORD,
lpStrings: *mut LPCWSTR,
lpRawData: LPVOID,
) -> BOOL;
// pub fn GetEventLogInformation();
// pub fn OperationStart();
// pub fn OperationEnd();
// pub fn AccessCheckAndAuditAlarmA();
// pub fn AccessCheckByTypeAndAuditAlarmA();
// pub fn AccessCheckByTypeResultListAndAuditAlarmA();
// pub fn AccessCheckByTypeResultListAndAuditAlarmByHandleA();
// pub fn ObjectOpenAuditAlarmA();
// pub fn ObjectPrivilegeAuditAlarmA();
// pub fn ObjectCloseAuditAlarmA();
// pub fn ObjectDeleteAuditAlarmA();
// pub fn PrivilegedServiceAuditAlarmA();
// pub fn AddConditionalAce();
// pub fn SetFileSecurityA();
// pub fn GetFileSecurityA();
pub fn ReadDirectoryChangesW(
hDirectory: HANDLE,
lpBuffer: LPVOID,
nBufferLength: DWORD,
bWatchSubtree: BOOL,
dwNotifyFilter: DWORD,
lpBytesReturned: LPDWORD,
lpOverlapped: LPOVERLAPPED,
lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE,
) -> BOOL;
pub fn MapViewOfFileExNuma(
hFileMappingObject: HANDLE,
dwDesiredAccess: DWORD,
dwFileOffsetHigh: DWORD,
dwFileOffsetLow: DWORD,
dwNumberOfBytesToMap: SIZE_T,
lpBaseAddress: LPVOID,
nndPreferred: DWORD,
) -> LPVOID;
pub fn IsBadReadPtr(
lp: *const VOID,
ucb: UINT_PTR
) -> BOOL;
pub fn IsBadWritePtr(
lp: LPVOID,
ucb: UINT_PTR
) -> BOOL;
pub fn IsBadHugeReadPtr(
lp: *const VOID,
ucb: UINT_PTR
) -> BOOL;
pub fn IsBadHugeWritePtr(
lp: LPVOID,
ucb: UINT_PTR
) -> BOOL;
pub fn IsBadCodePtr(
lpfn: FARPROC
) -> BOOL;
pub fn IsBadStringPtrA(
lpsz: LPCSTR,
ucchMax: UINT_PTR
) -> BOOL;
pub fn IsBadStringPtrW(
lpsz: LPCWSTR,
ucchMax: UINT_PTR
) -> BOOL;
// pub fn LookupAccountSidA();
// pub fn LookupAccountSidW();
pub fn LookupAccountNameA(
lpSystemName: LPCSTR,
lpAccountName: LPCSTR,
Sid: PSID,
cbSid: LPDWORD,
ReferencedDomainName: LPCSTR,
cchReferencedDomainName: LPDWORD,
peUse: PSID_NAME_USE,
) -> BOOL;
pub fn LookupAccountNameW(
lpSystemName: LPCWSTR,
lpAccountName: LPCWSTR,
Sid: PSID,
cbSid: LPDWORD,
ReferencedDomainName: LPCWSTR,
cchReferencedDomainName: LPDWORD,
peUse: PSID_NAME_USE,
) -> BOOL;
// pub fn LookupAccountNameLocalA();
// pub fn LookupAccountNameLocalW();
// pub fn LookupAccountSidLocalA();
// pub fn LookupAccountSidLocalW();
pub fn LookupPrivilegeValueA(
lpSystemName: LPCSTR,
lpName: LPCSTR,
lpLuid: PLUID,
) -> BOOL;
pub fn LookupPrivilegeValueW(
lpSystemName: LPCWSTR,
lpName: LPCWSTR,
lpLuid: PLUID,
) -> BOOL;
pub fn LookupPrivilegeNameA(
lpSystemName: LPCSTR,
lpLuid: PLUID,
lpName: LPSTR,
cchName: LPDWORD,
) -> BOOL;
pub fn LookupPrivilegeNameW(
lpSystemName: LPCWSTR,
lpLuid: PLUID,
lpName: LPWSTR,
cchName: LPDWORD,
) -> BOOL;
// pub fn LookupPrivilegeDisplayNameA();
// pub fn LookupPrivilegeDisplayNameW();
pub fn BuildCommDCBA(
lpDef: LPCSTR,
lpDCB: LPDCB
) -> BOOL;
pub fn BuildCommDCBW(
lpDef: LPCWSTR,
lpDCB: LPDCB
) -> BOOL;
pub fn BuildCommDCBAndTimeoutsA(
lpDef: LPCSTR,
lpDCB: LPDCB,
lpCommTimeouts: LPCOMMTIMEOUTS,
) -> BOOL;
pub fn BuildCommDCBAndTimeoutsW(
lpDef: LPCWSTR,
lpDCB: LPDCB,
lpCommTimeouts: LPCOMMTIMEOUTS,
) -> BOOL;
pub fn CommConfigDialogA(
lpszName: LPCSTR,
hWnd: HWND,
lpCC: LPCOMMCONFIG
) -> BOOL;
pub fn CommConfigDialogW(
lpszName: LPCWSTR,
hWnd: HWND,
lpCC: LPCOMMCONFIG
) -> BOOL;
pub fn GetDefaultCommConfigA(
lpszName: LPCSTR,
lpCC: LPCOMMCONFIG,
lpdwSize: LPDWORD
) -> BOOL;
pub fn GetDefaultCommConfigW(
lpszName: LPCWSTR,
lpCC: LPCOMMCONFIG,
lpdwSize: LPDWORD
) -> BOOL;
pub fn SetDefaultCommConfigA(
lpszName: LPCSTR,
lpCC: LPCOMMCONFIG,
dwSize: DWORD
) -> BOOL;
pub fn SetDefaultCommConfigW(
lpszName: LPCWSTR,
lpCC: LPCOMMCONFIG,
dwSize: DWORD
) -> BOOL;
pub fn GetComputerNameA(
lpBuffer: LPSTR,
nSize: LPDWORD
) -> BOOL;
pub fn GetComputerNameW(
lpBuffer: LPWSTR,
nSize: LPDWORD
) -> BOOL;
pub fn DnsHostnameToComputerNameA(
Hostname: LPCSTR,
ComputerName: LPCSTR,
nSize: LPDWORD,
) -> BOOL;
pub fn DnsHostnameToComputerNameW(
Hostname: LPCWSTR,
ComputerName: LPWSTR,
nSize: LPDWORD,
) -> BOOL;
pub fn GetUserNameA(
lpBuffer: LPSTR,
pcbBuffer: LPDWORD
) -> BOOL;
pub fn GetUserNameW(
lpBuffer: LPWSTR,
pcbBuffer: LPDWORD
) -> BOOL;
}
pub const LOGON32_LOGON_INTERACTIVE: DWORD = 2;
pub const LOGON32_LOGON_NETWORK: DWORD = 3;
pub const LOGON32_LOGON_BATCH: DWORD = 4;
pub const LOGON32_LOGON_SERVICE: DWORD = 5;
pub const LOGON32_LOGON_UNLOCK: DWORD = 7;
pub const LOGON32_LOGON_NETWORK_CLEARTEXT: DWORD = 8;
pub const LOGON32_LOGON_NEW_CREDENTIALS: DWORD = 9;
pub const LOGON32_PROVIDER_DEFAULT: DWORD = 0;
pub const LOGON32_PROVIDER_WINNT35: DWORD = 1;
pub const LOGON32_PROVIDER_WINNT40: DWORD = 2;
pub const LOGON32_PROVIDER_WINNT50: DWORD = 3;
pub const LOGON32_PROVIDER_VIRTUAL: DWORD = 4;
extern "system" {
pub fn LogonUserA(
lpUsername: LPCSTR,
lpDomain: LPCSTR,
lpPassword: LPCSTR,
dwLogonType: DWORD,
dwLogonProvider: DWORD,
phToken: PHANDLE
) -> BOOL;
pub fn LogonUserW(
lpUsername: LPCWSTR,
lpDomain: LPCWSTR,
lpPassword: LPCWSTR,
dwLogonType: DWORD,
dwLogonProvider: DWORD,
phToken: PHANDLE
) -> BOOL;
pub fn LogonUserExA(
lpUsername: LPCSTR,
lpDomain: LPCSTR,
lpPassword: LPCSTR,
dwLogonType: DWORD,
dwLogonProvider: DWORD,
phToken: PHANDLE,
ppLogonSid: *mut PSID,
ppProfileBuffer: *mut PVOID,
pdwProfileLength: LPDWORD,
pQuotaLimits: PQUOTA_LIMITS,
) -> BOOL;
pub fn LogonUserExW(
lpUsername: LPCWSTR,
lpDomain: LPCWSTR,
lpPassword: LPCWSTR,
dwLogonType: DWORD,
dwLogonProvider: DWORD,
phToken: PHANDLE,
ppLogonSid: *mut PSID,
ppProfileBuffer: *mut PVOID,
pdwProfileLength: LPDWORD,
pQuotaLimits: PQUOTA_LIMITS,
) -> BOOL;
// pub fn CreateProcessWithLogonW();
// pub fn CreateProcessWithTokenW();
// pub fn IsTokenUntrusted();
pub fn RegisterWaitForSingleObject(
phNewWaitObject: PHANDLE,
hObject: HANDLE,
Callback: WAITORTIMERCALLBACK,
Context: PVOID,
dwMilliseconds: ULONG,
dwFlags: ULONG,
) -> BOOL;
pub fn UnregisterWait(
WaitHandle: HANDLE
) -> BOOL;
pub fn BindIoCompletionCallback(
FileHandle: HANDLE,
Function: LPOVERLAPPED_COMPLETION_ROUTINE,
Flags: ULONG,
) -> BOOL;
pub fn SetTimerQueueTimer(
TimerQueue: HANDLE,
Callback: WAITORTIMERCALLBACK,
Parameter: PVOID,
DueTime: DWORD,
Period: DWORD,
PreferIo: BOOL,
) -> HANDLE;
pub fn CancelTimerQueueTimer(
TimerQueue: HANDLE,
Timer: HANDLE
) -> BOOL;
pub fn DeleteTimerQueue(
TimerQueue: HANDLE
) -> BOOL;
// pub fn InitializeThreadpoolEnvironment();
// pub fn SetThreadpoolCallbackPool();
// pub fn SetThreadpoolCallbackCleanupGroup();
// pub fn SetThreadpoolCallbackRunsLong();
// pub fn SetThreadpoolCallbackLibrary();
// pub fn SetThreadpoolCallbackPriority();
// pub fn DestroyThreadpoolEnvironment();
// pub fn SetThreadpoolCallbackPersistent();
pub fn CreatePrivateNamespaceA(
lpPrivateNamespaceAttributes: LPSECURITY_ATTRIBUTES,
lpBoundaryDescriptor: LPVOID,
lpAliasPrefix: LPCSTR,
) -> HANDLE;
pub fn OpenPrivateNamespaceA(
lpBoundaryDescriptor: LPVOID,
lpAliasPrefix: LPCSTR
) -> HANDLE;
pub fn CreateBoundaryDescriptorA(
Name: LPCSTR,
Flags: ULONG
) -> HANDLE;
pub fn AddIntegrityLabelToBoundaryDescriptor(
BoundaryDescriptor: *mut HANDLE,
IntegrityLabel: PSID,
) -> BOOL;
}
pub const HW_PROFILE_GUIDLEN: usize = 39;
// MAX_PROFILE_LEN
pub const DOCKINFO_UNDOCKED: DWORD = 0x1;
pub const DOCKINFO_DOCKED: DWORD = 0x2;
pub const DOCKINFO_USER_SUPPLIED: DWORD = 0x4;
pub const DOCKINFO_USER_UNDOCKED: DWORD = DOCKINFO_USER_SUPPLIED | DOCKINFO_UNDOCKED;
pub const DOCKINFO_USER_DOCKED: DWORD = DOCKINFO_USER_SUPPLIED | DOCKINFO_DOCKED;
STRUCT!{struct HW_PROFILE_INFOA {
dwDockInfo: DWORD,
szHwProfileGuid: [CHAR; HW_PROFILE_GUIDLEN],
szHwProfileName: [CHAR; MAX_PROFILE_LEN],
}}
pub type LPHW_PROFILE_INFOA = *mut HW_PROFILE_INFOA;
STRUCT!{struct HW_PROFILE_INFOW {
dwDockInfo: DWORD,
szHwProfileGuid: [WCHAR; HW_PROFILE_GUIDLEN],
szHwProfileName: [WCHAR; MAX_PROFILE_LEN],
}}
pub type LPHW_PROFILE_INFOW = *mut HW_PROFILE_INFOW;
extern "system" {
pub fn GetCurrentHwProfileA(
lpHwProfileInfo: LPHW_PROFILE_INFOA
) -> BOOL;
pub fn GetCurrentHwProfileW(
lpHwProfileInfo: LPHW_PROFILE_INFOW
) -> BOOL;
pub fn VerifyVersionInfoA(
lpVersionInformation: LPOSVERSIONINFOEXA,
dwTypeMask: DWORD,
dwlConditionMask: DWORDLONG,
) -> BOOL;
pub fn VerifyVersionInfoW(
lpVersionInformation: LPOSVERSIONINFOEXW,
dwTypeMask: DWORD,
dwlConditionMask: DWORDLONG,
) -> BOOL;
}
STRUCT!{struct SYSTEM_POWER_STATUS {
ACLineStatus: BYTE,
BatteryFlag: BYTE,
BatteryLifePercent: BYTE,
Reserved1: BYTE,
BatteryLifeTime: DWORD,
BatteryFullLifeTime: DWORD,
}}
pub type LPSYSTEM_POWER_STATUS = *mut SYSTEM_POWER_STATUS;
extern "system" {
pub fn GetSystemPowerStatus(
lpSystemPowerStatus: LPSYSTEM_POWER_STATUS
) -> BOOL;
pub fn SetSystemPowerState(
fSuspend: BOOL,
fForce: BOOL
) -> BOOL;
pub fn MapUserPhysicalPagesScatter(
VirtualAddresses: *mut PVOID,
NumberOfPages: ULONG_PTR,
PageArray: PULONG_PTR,
) -> BOOL;
pub fn CreateJobObjectA(
lpJobAttributes: LPSECURITY_ATTRIBUTES,
lpName: LPCSTR
) -> HANDLE;
pub fn OpenJobObjectA(
dwDesiredAccess: DWORD,
bInheritHandle: BOOL,
lpName: LPCSTR
) -> HANDLE;
pub fn CreateJobSet(
NumJob: ULONG,
UserJobSet: PJOB_SET_ARRAY,
Flags: ULONG
) -> BOOL;
pub fn FindFirstVolumeA(
lpszVolumeName: LPSTR,
cchBufferLength: DWORD
) -> HANDLE;
pub fn FindNextVolumeA(
hFindVolume: HANDLE,
lpszVolumeName: LPSTR,
cchBufferLength: DWORD,
) -> BOOL;
pub fn FindFirstVolumeMountPointA(
lpszRootPathName: LPCSTR,
lpszVolumeMountPoint: LPSTR,
cchBufferLength: DWORD,
) -> HANDLE;
pub fn FindFirstVolumeMountPointW(
lpszRootPathName: LPCWSTR,
lpszVolumeMountPoint: LPWSTR,
cchBufferLength: DWORD,
) -> HANDLE;
pub fn FindNextVolumeMountPointA(
hFindVolumeMountPoint: HANDLE,
lpszVolumeMountPoint: LPSTR,
cchBufferLength: DWORD,
) -> BOOL;
pub fn FindNextVolumeMountPointW(
hFindVolumeMountPoint: HANDLE,
lpszVolumeMountPoint: LPWSTR,
cchBufferLength: DWORD,
) -> BOOL;
pub fn FindVolumeMountPointClose(
hFindVolumeMountPoint: HANDLE
) -> BOOL;
pub fn SetVolumeMountPointA(
lpszVolumeMountPoint: LPCSTR,
lpszVolumeName: LPCSTR
) -> BOOL;
pub fn SetVolumeMountPointW(
lpszVolumeMountPoint: LPCWSTR,
lpszVolumeName: LPCWSTR
) -> BOOL;
pub fn DeleteVolumeMountPointA(
lpszVolumeMountPoint: LPCSTR
) -> BOOL;
pub fn GetVolumeNameForVolumeMountPointA(
lpszVolumeMountPoint: LPCSTR,
lpszVolumeName: LPSTR,
cchBufferLength: DWORD,
) -> BOOL;
pub fn GetVolumePathNameA(
lpszFileName: LPCSTR,
lpszVolumePathName: LPSTR,
cchBufferLength: DWORD,
) -> BOOL;
pub fn GetVolumePathNamesForVolumeNameA(
lpszVolumeName: LPCSTR,
lpszVolumePathNames: LPCH,
cchBufferLength: DWORD,
lpcchReturnLength: PDWORD,
) -> BOOL;
}
// ACTCTX_FLAG_*
STRUCT!{struct ACTCTXA {
cbSize: ULONG,
dwFlags: DWORD,
lpSource: LPCSTR,
wProcessorArchitecture: USHORT,
wLangId: LANGID,
lpAssemblyDirectory: LPCSTR,
lpResourceName: LPCSTR,
lpApplicationName: LPCSTR,
hModule: HMODULE,
}}
pub type PACTCTXA = *mut ACTCTXA;
STRUCT!{struct ACTCTXW {
cbSize: ULONG,
dwFlags: DWORD,
lpSource: LPCWSTR,
wProcessorArchitecture: USHORT,
wLangId: LANGID,
lpAssemblyDirectory: LPCWSTR,
lpResourceName: LPCWSTR,
lpApplicationName: LPCWSTR,
hModule: HMODULE,
}}
pub type PACTCTXW = *mut ACTCTXW;
pub type PCACTCTXA = *const ACTCTXA;
pub type PCACTCTXW = *const ACTCTXW;
extern "system" {
pub fn CreateActCtxA(
pActCtx: PCACTCTXA
) -> HANDLE;
pub fn CreateActCtxW(
pActCtx: PCACTCTXW
) -> HANDLE;
pub fn AddRefActCtx(
hActCtx: HANDLE
);
pub fn ReleaseActCtx(
hActCtx: HANDLE
);
pub fn ZombifyActCtx(
hActCtx: HANDLE
) -> BOOL;
pub fn ActivateActCtx(
hActCtx: HANDLE,
lpCookie: *mut ULONG_PTR
) -> BOOL;
pub fn DeactivateActCtx(
dwFlags: DWORD,
ulCookie: ULONG_PTR
) -> BOOL;
pub fn GetCurrentActCtx(
lphActCtx: *mut HANDLE
) -> BOOL;
}
STRUCT!{struct ACTCTX_SECTION_KEYED_DATA_ASSEMBLY_METADATA {
lpInformation: PVOID,
lpSectionBase: PVOID,
ulSectionLength: ULONG,
lpSectionGlobalDataBase: PVOID,
ulSectionGlobalDataLength: ULONG,
}}
pub type PACTCTX_SECTION_KEYED_DATA_ASSEMBLY_METADATA =
*mut ACTCTX_SECTION_KEYED_DATA_ASSEMBLY_METADATA;
pub type PCACTCTX_SECTION_KEYED_DATA_ASSEMBLY_METADATA =
*const ACTCTX_SECTION_KEYED_DATA_ASSEMBLY_METADATA;
STRUCT!{struct ACTCTX_SECTION_KEYED_DATA {
cbSize: ULONG,
ulDataFormatVersion: ULONG,
lpData: PVOID,
ulLength: ULONG,
lpSectionGlobalData: PVOID,
ulSectionGlobalDataLength: ULONG,
lpSectionBase: PVOID,
ulSectionTotalLength: ULONG,
hActCtx: HANDLE,
ulAssemblyRosterIndex: ULONG,
ulFlags: ULONG,
AssemblyMetadata: ACTCTX_SECTION_KEYED_DATA_ASSEMBLY_METADATA,
}}
pub type PACTCTX_SECTION_KEYED_DATA = *mut ACTCTX_SECTION_KEYED_DATA;
pub type PCACTCTX_SECTION_KEYED_DATA = *const ACTCTX_SECTION_KEYED_DATA;
extern "system" {
pub fn FindActCtxSectionStringA(
dwFlags: DWORD,
lpExtensionGuid: *const GUID,
ulSectionId: ULONG,
lpStringToFind: LPCSTR,
ReturnedData: PACTCTX_SECTION_KEYED_DATA,
) -> BOOL;
pub fn FindActCtxSectionStringW(
dwFlags: DWORD,
lpExtensionGuid: *const GUID,
ulSectionId: ULONG,
lpStringToFind: LPCWSTR,
ReturnedData: PACTCTX_SECTION_KEYED_DATA,
) -> BOOL;
pub fn FindActCtxSectionGuid(
dwFlags: DWORD,
lpExtensionGuid: *const GUID,
ulSectionId: ULONG,
lpGuidToFind: *const GUID,
ReturnedData: PACTCTX_SECTION_KEYED_DATA,
) -> BOOL;
pub fn QueryActCtxW(
dwFlags: DWORD,
hActCtx: HANDLE,
pvSubInstance: PVOID,
ulInfoClass: ULONG,
pvBuffer: PVOID,
cbBuffer: SIZE_T,
pcbWrittenOrRequired: *mut SIZE_T,
) -> BOOL;
pub fn WTSGetActiveConsoleSessionId() -> DWORD;
// pub fn WTSGetServiceSessionId();
// pub fn WTSIsServerContainer();
pub fn GetActiveProcessorGroupCount() -> WORD;
pub fn GetMaximumProcessorGroupCount() -> WORD;
pub fn GetActiveProcessorCount(
GroupNumber: WORD
) -> DWORD;
pub fn GetMaximumProcessorCount(
GroupNumber: WORD
) -> DWORD;
pub fn GetNumaProcessorNode(
Processor: UCHAR,
NodeNumber: PUCHAR
) -> BOOL;
pub fn GetNumaNodeNumberFromHandle(
hFile: HANDLE,
NodeNumber: PUSHORT
) -> BOOL;
pub fn GetNumaProcessorNodeEx(
Processor: PPROCESSOR_NUMBER,
NodeNumber: PUSHORT
) -> BOOL;
pub fn GetNumaNodeProcessorMask(
Node: UCHAR,
ProcessorMask: PULONGLONG
) -> BOOL;
pub fn GetNumaAvailableMemoryNode(
Node: UCHAR,
AvailableBytes: PULONGLONG
) -> BOOL;
pub fn GetNumaAvailableMemoryNodeEx(
Node: USHORT,
AvailableBytes: PULONGLONG,
) -> BOOL;
pub fn GetNumaProximityNode(
ProximityId: ULONG,
NodeNumber: PUCHAR
) -> BOOL;
}
FN!{stdcall APPLICATION_RECOVERY_CALLBACK(
pvParameter: PVOID,
) -> DWORD}
// RESTART_*
// RECOVERY_*
extern "system" {
pub fn RegisterApplicationRecoveryCallback(
pRecoveyCallback: APPLICATION_RECOVERY_CALLBACK,
pvParameter: PVOID,
dwPingInterval: DWORD,
dwFlags: DWORD,
) -> HRESULT;
pub fn UnregisterApplicationRecoveryCallback() -> HRESULT;
pub fn RegisterApplicationRestart(
pwzCommandline: PCWSTR,
dwFlags: DWORD,
) -> HRESULT;
pub fn UnregisterApplicationRestart() -> HRESULT;
pub fn GetApplicationRecoveryCallback(
hProcess: HANDLE,
pRecoveryCallback: *mut APPLICATION_RECOVERY_CALLBACK,
ppvParameter: *mut PVOID,
pdwPingInterval: PDWORD,
pdwFlags: PDWORD,
) -> HRESULT;
pub fn GetApplicationRestartSettings(
hProcess: HANDLE,
pwzCommandline: PWSTR,
pcchSize: PDWORD,
pdwFlags: PDWORD,
) -> HRESULT;
pub fn ApplicationRecoveryInProgress(
pbCancelled: PBOOL
) -> HRESULT;
pub fn ApplicationRecoveryFinished(
bSuccess: BOOL
);
}
// FILE_BASIC_INFO, etc.
extern "system" {
pub fn GetFileInformationByHandleEx(
hFile: HANDLE,
FileInformationClass: FILE_INFO_BY_HANDLE_CLASS,
lpFileInformation: LPVOID,
dwBufferSize: DWORD,
) -> BOOL;
}
ENUM!{enum FILE_ID_TYPE {
FileIdType,
ObjectIdType,
ExtendedFileIdType,
MaximumFileIdType,
}}
UNION!{union FILE_ID_DESCRIPTOR_u {
[u64; 2],
FileId FileId_mut: LARGE_INTEGER,
ObjectId ObjectId_mut: GUID,
ExtendedFileId ExtendedFileId_mut: FILE_ID_128,
}}
STRUCT!{struct FILE_ID_DESCRIPTOR {
dwSize: DWORD,
Type: FILE_ID_TYPE,
u: FILE_ID_DESCRIPTOR_u,
}}
pub type LPFILE_ID_DESCRIPTOR = *mut FILE_ID_DESCRIPTOR;
extern "system" {
pub fn OpenFileById(
hVolumeHint: HANDLE,
lpFileId: LPFILE_ID_DESCRIPTOR,
dwDesiredAccess: DWORD,
dwShareMode: DWORD,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
dwFlagsAndAttributes: DWORD,
) -> HANDLE;
pub fn CreateSymbolicLinkA(
lpSymlinkFileName: LPCSTR,
lpTargetFileName: LPCSTR,
dwFlags: DWORD,
) -> BOOLEAN;
pub fn CreateSymbolicLinkW(
lpSymlinkFileName: LPCWSTR,
lpTargetFileName: LPCWSTR,
dwFlags: DWORD,
) -> BOOLEAN;
pub fn QueryActCtxSettingsW(
dwFlags: DWORD,
hActCtx: HANDLE,
settingsNameSpace: PCWSTR,
settingName: PCWSTR,
pvBuffer: PWSTR,
dwBuffer: SIZE_T,
pdwWrittenOrRequired: *mut SIZE_T,
) -> BOOL;
pub fn CreateSymbolicLinkTransactedA(
lpSymlinkFileName: LPCSTR,
lpTargetFileName: LPCSTR,
dwFlags: DWORD,
hTransaction: HANDLE,
) -> BOOLEAN;
pub fn CreateSymbolicLinkTransactedW(
lpSymlinkFileName: LPCWSTR,
lpTargetFileName: LPCWSTR,
dwFlags: DWORD,
hTransaction: HANDLE,
) -> BOOLEAN;
pub fn ReplacePartitionUnit(
TargetPartition: PWSTR,
SparePartition: PWSTR,
Flags: ULONG,
) -> BOOL;
pub fn AddSecureMemoryCacheCallback(
pfnCallBack: PSECURE_MEMORY_CACHE_CALLBACK
) -> BOOL;
pub fn RemoveSecureMemoryCacheCallback(
pfnCallBack: PSECURE_MEMORY_CACHE_CALLBACK
) -> BOOL;
pub fn CopyContext(
Destination: PCONTEXT,
ContextFlags: DWORD,
Source: PCONTEXT
) -> BOOL;
pub fn InitializeContext(
Buffer: PVOID,
ContextFlags: DWORD,
Context: *mut PCONTEXT,
ContextLength: PDWORD,
) -> BOOL;
pub fn GetEnabledXStateFeatures() -> DWORD64;
pub fn GetXStateFeaturesMask(
Context: PCONTEXT,
FeatureMask: PDWORD64
) -> BOOL;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn LocateXStateFeature(
Context: PCONTEXT,
FeatureId: DWORD,
Length: PDWORD
) -> PVOID;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn SetXStateFeaturesMask(
Context: PCONTEXT,
FeatureMask: DWORD64
) -> BOOL;
pub fn EnableThreadProfiling(
ThreadHandle: HANDLE,
Flags: DWORD,
HardwareCounters: DWORD64,
PerformanceDataHandle: *mut HANDLE,
) -> BOOL;
pub fn DisableThreadProfiling(
PerformanceDataHandle: HANDLE
) -> DWORD;
pub fn QueryThreadProfiling(
ThreadHandle: HANDLE,
Enabled: PBOOLEAN
) -> DWORD;
pub fn ReadThreadProfilingData(
PerformanceDataHandle: HANDLE,
Flags: DWORD,
PerformanceData: PPERFORMANCE_DATA,
) -> DWORD;
// intrinsic InterlockedIncrement
// intrinsic InterlockedDecrement
// intrinsic InterlockedExchange
// intrinsic InterlockedExchangeAdd
// intrinsic InterlockedExchangeSubtract
// intrinsic InterlockedCompareExchange
// intrinsic InterlockedAnd
// intrinsic InterlockedOr
// intrinsic InterlockedXor
}
| 29.709654 | 102 | 0.655807 |
f5e038060455fdb3e549302f21b55f193c2f2559 | 26,163 | // Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use alloc::sync::Arc;
use alloc::vec::Vec;
use super::super::qlib::common::*;
use super::super::qlib::linux_def::*;
use super::super::socket::socket::*;
use super::super::task::*;
//use super::super::socket::control::*;
//use super::super::socket::control::ControlMessage;
use super::super::fs::file::*;
use super::super::fs::flags::*;
use super::super::kernel::fd_table::*;
use super::super::kernel::time::*;
use super::super::qlib::linux::time::*;
use super::super::syscalls::syscalls::*;
//use super::super::qlib::linux::socket::*;
use super::super::kernel::timer::*;
// minListenBacklog is the minimum reasonable backlog for listening sockets.
const MIN_LISTEN_BACKLOG: u32 = 8;
// maxListenBacklog is the maximum allowed backlog for listening sockets.
const MAX_LISTEN_BACKLOG: u32 = 1024;
// maxAddrLen is the maximum socket address length we're willing to accept.
const MAX_ADDR_LEN: u32 = 200;
// maxOptLen is the maximum sockopt parameter length we're willing to accept.
const MAX_OPT_LEN: u32 = 1024;
// maxControlLen is the maximum length of the msghdr.msg_control buffer we're
// willing to accept. Note that this limit is smaller than Linux, which allows
// buffers upto INT_MAX.
const MAX_CONTROL_LEN: usize = 10 * 1024 * 1024;
// nameLenOffset is the offset from the start of the MessageHeader64 struct to
// the NameLen field.
const NAME_LEN_OFFSET: u32 = 8;
// controlLenOffset is the offset form the start of the MessageHeader64 struct
// to the ControlLen field.
const CONTROL_LEN_OFFSET: u32 = 40;
// flagsOffset is the offset form the start of the MessageHeader64 struct
// to the Flags field.
const FLAGS_OFFSET: u32 = 48;
pub fn SysSocket(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let domain = args.arg0 as i32;
let stype = args.arg1 as i32;
let protocol = args.arg2 as i32;
if stype & !(0xf | SocketFlags::SOCK_CLOEXEC | SocketFlags::SOCK_NONBLOCK) != 0 {
return Err(Error::SysError(SysErr::EINVAL));
}
let s = NewSocket(task, domain, stype & 0xf, protocol)?;
let flags = SettableFileFlags {
NonBlocking: stype & Flags::O_NONBLOCK != 0,
..Default::default()
};
s.SetFlags(task, flags);
s.flags.lock().0.NonSeekable = true;
let fd = task.NewFDFrom(
0,
&s,
&FDFlags {
CloseOnExec: stype & SocketFlags::SOCK_CLOEXEC != 0,
},
)?;
return Ok(fd as i64);
}
pub fn SysSocketPair(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let domain = args.arg0 as i32;
let stype = args.arg1 as i32;
let protocol = args.arg2 as i32;
let socks = args.arg3 as u64;
if stype & !(0xf | SocketFlags::SOCK_CLOEXEC | SocketFlags::SOCK_NONBLOCK) != 0 {
return Err(Error::SysError(SysErr::EINVAL));
}
let fileFlags = SettableFileFlags {
NonBlocking: stype & Flags::O_NONBLOCK != 0,
..Default::default()
};
let fdFlags = FDFlags {
CloseOnExec: stype & SocketFlags::SOCK_CLOEXEC != 0,
};
let (s1, s2) = NewPair(task, domain, stype & 0xf, protocol)?;
s1.SetFlags(task, fileFlags);
s1.flags.lock().0.NonSeekable = true;
s2.SetFlags(task, fileFlags);
s2.flags.lock().0.NonSeekable = true;
let fd1 = task.NewFDFrom(0, &s1, &fdFlags)?;
let fd2 = task.NewFDFrom(0, &s2, &fdFlags)?;
let fds = [fd1, fd2];
task.CopyOutSlice(&fds, socks, 2)?;
return Ok(0);
}
pub fn CaptureAddress(task: &Task, addr: u64, addrlen: u32) -> Result<Vec<u8>> {
if addrlen > MAX_ADDR_LEN {
return Err(Error::SysError(SysErr::EINVAL));
}
//task.CheckPermission(addr, addrlen as u64, false, false)?;
return task.CopyInVec(addr, addrlen as usize);
}
#[derive(Debug)]
pub struct SockaddrIn {
pub sin_family: u16,
pub sin_port: u16,
pub sin_addr: [u8; 4],
pub sin_zero: [u8; 8],
}
pub fn SysConnect(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let addr = args.arg1 as u64;
let addrlen = args.arg2 as u32;
if addrlen > MAX_ADDR_LEN {
return Err(Error::SysError(SysErr::EINVAL));
}
let file = task.GetFile(fd)?;
let blocking = !file.Flags().NonBlocking;
let sock = file.FileOp.clone();
if addrlen > MAX_ADDR_LEN as u32 {
return Err(Error::SysError(SysErr::EINVAL));
}
let addrstr = CaptureAddress(task, addr, addrlen)?;
sock.Connect(task, &addrstr, blocking)?;
return Ok(0);
}
pub fn SysAccept4(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let addr = args.arg1 as u64;
let addrlen = args.arg2 as u64;
let flags = args.arg3 as i32;
return Accept4(task, fd, addr, addrlen, flags);
}
pub fn Accept4(task: &Task, fd: i32, addr: u64, addrlen: u64, flags: i32) -> Result<i64> {
if flags & !(SocketFlags::SOCK_CLOEXEC | SocketFlags::SOCK_NONBLOCK) != 0 {
return Err(Error::SysError(SysErr::EINVAL));
}
let file = task.GetFile(fd)?;
let sock = file.FileOp.clone();
let blocking = !file.Flags().NonBlocking;
let len = if addrlen == 0 {
0
} else {
let len = task.CopyInObj::<i32>(addrlen)?;
if len < 0 {
return Err(Error::SysError(SysErr::EINVAL));
}
len as u32
};
let mut addrstr: [u8; MAX_ADDR_LEN as usize] = [0; MAX_ADDR_LEN as usize];
let mut len = if len < MAX_ADDR_LEN {
len
} else {
MAX_ADDR_LEN as u32
};
let lenCopy = len;
let peerRequested = len != 0;
let addrstr = &mut addrstr[..len as usize];
let nfd = match sock.Accept(task, addrstr, &mut len, flags, blocking) {
Err(Error::ErrInterrupted) => return Err(Error::SysError(SysErr::ERESTARTSYS)),
Err(e) => return Err(e),
Ok(nfd) => nfd,
};
if peerRequested {
task.CopyOutSlice(addrstr, addr, lenCopy as usize)?;
//*task.GetTypeMut::<i32>(addrlen)? = len as i32;
task.CopyOutObj(&(len as i32), addrlen)?
}
return Ok(nfd);
}
pub fn SysAccept(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let addr = args.arg1 as u64;
let addrlen = args.arg2 as u64;
return Accept4(task, fd, addr, addrlen, 0);
}
pub fn SysBind(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let addr = args.arg1 as u64;
let addrlen = args.arg2 as usize;
let file = task.GetFile(fd)?;
let sock = file.FileOp.clone();
if addrlen > MAX_ADDR_LEN as usize {
return Err(Error::SysError(SysErr::EINVAL));
}
let addrstr = task.CopyInVec(addr, addrlen as usize)?;
let res = sock.Bind(task, &addrstr);
return res;
}
pub fn SysListen(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let backlog = args.arg1 as i32;
let file = task.GetFile(fd)?;
let sock = file.FileOp.clone();
let mut backlog = backlog;
if backlog <= 0 {
backlog = MIN_LISTEN_BACKLOG as i32;
}
if backlog >= MAX_LISTEN_BACKLOG as i32 {
backlog = MAX_LISTEN_BACKLOG as i32;
}
let res = sock.Listen(task, backlog);
return res;
}
pub fn SysShutdown(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let how = args.arg1 as i32;
let file = task.GetFile(fd)?;
let sock = file.FileOp.clone();
let how = how as u64;
if !(how == LibcConst::SHUT_RD || how == LibcConst::SHUT_WR || how == LibcConst::SHUT_RDWR) {
return Err(Error::SysError(SysErr::EINVAL));
}
let res = sock.Shutdown(task, how as i32);
return res;
}
pub fn SysGetSockOpt(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let level = args.arg1 as i32;
let name = args.arg2 as i32;
let optValAddr = args.arg3 as u64;
let optLenAddr = args.arg4 as u64;
let file = task.GetFile(fd)?;
let sock = file.FileOp.clone();
let optlen = if optLenAddr != 0 {
let optlen = task.CopyInObj::<i32>(optLenAddr)?;
if optlen < 0 {
return Err(Error::SysError(SysErr::EINVAL));
}
optlen
} else {
0
};
let mut optVal: [u8; MAX_OPT_LEN as usize] = [0; MAX_OPT_LEN as usize];
let res = sock.GetSockOpt(task, level, name, &mut optVal[..optlen as usize])?;
if res < 0 {
panic!("GetSockOpt: get negative optlen")
}
let len = res as usize;
task.CopyOutSlice(&optVal[..len], optValAddr, len)?;
//*task.GetTypeMut(optLenAddr)? = len as i32;
task.CopyOutObj(&(len as i32), optLenAddr)?;
return Ok(0);
}
pub fn SysSetSockOpt(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let level = args.arg1 as i32;
let name = args.arg2 as i32;
let optValAddr = args.arg3 as u64;
let optLen = args.arg4 as i32;
let file = task.GetFile(fd)?;
let sock = file.FileOp.clone();
// Linux allows optlen = 0, which is equivalent to optval = 0,
// see `do_ip_setsockopt` in linux/source/net/ipv4/ip_sockglue.c
if optLen < 0 || optLen > MAX_OPT_LEN as i32 {
return Err(Error::SysError(SysErr::EINVAL));
}
let optVal = task.CopyInVec(optValAddr, optLen as usize)?;
let res = sock.SetSockOpt(task, level, name, &optVal[..optLen as usize])?;
return Ok(res);
}
pub fn SysGetSockName(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let addr = args.arg1 as u64;
let addrlen = args.arg2 as u64;
let file = task.GetFile(fd)?;
let sock = file.FileOp.clone();
let mut buf: [u8; MAX_ADDR_LEN as usize] = [0; MAX_ADDR_LEN as usize];
let len = task.CopyInObj::<i32>(addrlen)?;
let len = if len > MAX_ADDR_LEN as i32 {
MAX_ADDR_LEN as i32
} else {
len
};
let mut outputlen = sock.GetSockName(task, &mut buf[0..len as usize])? as usize;
//*(task.GetTypeMut::<i32>(addrlen)?) = outputlen as i32;
task.CopyOutObj(&(outputlen as i32), addrlen)?;
if len < outputlen as i32 {
outputlen = len as usize;
}
task.CopyOutSlice(&buf[..outputlen as usize], addr, outputlen as usize)?;
return Ok(0);
}
pub fn SysGetPeerName(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let addr = args.arg1 as u64;
let addrlen = args.arg2 as u64;
let file = task.GetFile(fd)?;
let sock = file.FileOp.clone();
let mut buf: [u8; MAX_ADDR_LEN as usize] = [0; MAX_ADDR_LEN as usize];
let mut outputlen = sock.GetPeerName(task, &mut buf)? as usize;
//info!("SysGetPeerName buf is {}", &buf[..outputlen as usize]);
let len = task.CopyInObj::<i32>(addrlen)?;
if len < outputlen as i32 {
outputlen = len as usize;
}
task.CopyOutSlice(&buf[..outputlen as usize], addr, addrlen as usize)?;
//*(task.GetTypeMut::<i32>(addrlen)?) = outputlen as i32;
task.CopyOutObj(&(outputlen as i32), addrlen)?;
return Ok(0);
}
fn recvSingleMsg(
task: &Task,
sock: &Arc<FileOperations>,
msgPtr: u64,
flags: i32,
deadline: Option<Time>,
) -> Result<i64> {
//let msg = task.GetTypeMut::<MsgHdr>(msgPtr)?;
let mut msg: MsgHdr = task.CopyInObj(msgPtr)?;
if msg.iovLen > UIO_MAXIOV {
return Err(Error::SysError(SysErr::EMSGSIZE));
}
if msg.msgControl == 0 {
msg.msgControlLen = 0;
}
if msg.msgName == 0 {
msg.nameLen = 0;
}
let mut dst = task.IovsFromAddr(msg.iov, msg.iovLen)?;
if flags & MsgType::MSG_ERRQUEUE != 0 {
// Pretend we have an empty error queue.
return Err(Error::SysError(SysErr::EAGAIN));
}
// Fast path when no control message nor name buffers are provided.
if msg.msgControlLen == 0 && msg.nameLen == 0 {
let (n, mut mflags, _, controlMessageBuffer) =
sock.RecvMsg(task, &mut dst, flags, deadline, false, 0)?;
if controlMessageBuffer.len() != 0 {
mflags |= MsgType::MSG_CTRUNC;
}
msg.msgFlags = mflags;
return Ok(n as i64);
}
if msg.msgControlLen > MAX_CONTROL_LEN {
return Err(Error::SysError(SysErr::ENOBUFS));
}
let mut addressVec: Vec<u8> = vec![0; msg.nameLen as usize];
//let mut controlVec: Vec<u8> = vec![0; msg.msgControlLen as usize];
let (n, mflags, sender, controlMessageBuffer) = sock.RecvMsg(
task,
&mut dst,
flags,
deadline,
msg.nameLen != 0,
msg.msgControlLen,
)?;
/*
let controlData = &mut controlVec[..];
//todo: handle Timestamp ControlMessage
let mut credType : [u8; 4] = [0; 4];
let controlData = if let Ok(_) = sock.GetSockOpt(task, SOL_SOCKET, LibcConst::SO_PASSCRED as i32, &mut credType) {
if credType[0] != 0 {
match cms.Credentials {
// Edge case: user set SO_PASSCRED but the sender didn't set it in control massage
None => {
let (data, flags) = ControlMessageCredentials::Empty().EncodeInto(controlData, mflags);
mflags = flags;
data
}
Some(ref creds) => {
let (data, flags) = creds.Credentials().EncodeInto(controlData, mflags);
mflags = flags;
data
},
}
} else {
controlData
}
} else {
controlData
};
let controlData = match cms.Rights {
None => controlData,
Some(ref mut rights) => {
let maxFDs = (controlData.len() as isize - SIZE_OF_CONTROL_MESSAGE_HEADER as isize) / 4;
if maxFDs < 0 {
mflags |= MsgType::MSG_CTRUNC;
controlData
} else {
let (fds, trunc) = rights.RightsFDs(task, flags & MsgType::MSG_CMSG_CLOEXEC != 0, maxFDs as usize);
if trunc {
mflags |= MsgType::MSG_CTRUNC;
}
let (data, _) = ControlMessageRights(fds).EncodeInto(controlData, mflags);
data
}
},
};
msg.msgControlLen = msg.msgControlLen - controlData.len();
*/
msg.msgControlLen = controlMessageBuffer.len();
if msg.nameLen != 0 && msg.msgName != 0 && sender.is_some() {
let (sender, senderLen) = sender.unwrap();
if msg.nameLen < senderLen as u32 {
return Err(Error::SysError(SysErr::ERANGE));
}
sender.Marsh(&mut addressVec[..], senderLen)?;
task.CopyOutSlice(&addressVec[0..senderLen], msg.msgName, msg.nameLen as usize)?;
msg.nameLen = senderLen as u32;
}
if msg.msgControl != 0 && msg.msgControlLen != 0 {
task.CopyOutSlice(
&controlMessageBuffer[0..msg.msgControlLen as usize],
msg.msgControl,
msg.msgControlLen,
)?;
} else {
msg.msgControlLen = 0;
}
msg.msgFlags = mflags;
task.CopyOutObj(&msg, msgPtr)?;
return Ok(n);
}
fn sendSingleMsg(
task: &Task,
sock: &Arc<FileOperations>,
msgPtr: u64,
flags: i32,
deadline: Option<Time>,
) -> Result<i64> {
let msg = task.CopyInObj::<MsgHdr>(msgPtr)?;
if msg.msgControlLen > MAX_CONTROL_LEN as usize {
return Err(Error::SysError(SysErr::ENOBUFS));
}
if msg.iovLen > UIO_MAXIOV {
return Err(Error::SysError(SysErr::EMSGSIZE));
}
let msgVec: Vec<u8> = task.CopyInVec(msg.msgName, msg.nameLen as usize)?;
let controlVec: Vec<u8> = task.CopyInVec(msg.msgControl, msg.msgControlLen as usize)?;
let mut pMsg = msg;
if msg.nameLen > 0 {
pMsg.msgName = &msgVec[0] as *const _ as u64;
}
if msg.msgControlLen > 0 {
pMsg.msgControl = &controlVec[0] as *const _ as u64;
}
let src = task.IovsFromAddr(msg.iov, msg.iovLen)?;
let res = sock.SendMsg(task, &src, flags, &mut pMsg, deadline)?;
task.CopyOutObj(&msg, msgPtr)?;
return Ok(res);
}
pub fn SysRecvMsg(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let msgPtr = args.arg1 as u64;
let mut flags = args.arg2 as i32;
let file = task.GetFile(fd)?;
let sock = file.FileOp.clone();
if flags
& !(MsgType::BASE_RECV_FLAGS
| MsgType::MSG_PEEK
| MsgType::MSG_CMSG_CLOEXEC
| MsgType::MSG_ERRQUEUE)
!= 0
{
return Err(Error::SysError(SysErr::EINVAL));
}
if !file.Blocking() {
flags |= MsgType::MSG_DONTWAIT
}
let mut deadline = None;
let dl = file.FileOp.RecvTimeout();
if dl > 0 {
let now = MonotonicNow();
deadline = Some(Time(now + dl));
} else if dl < 0 {
flags |= MsgType::MSG_DONTWAIT
}
let res = recvSingleMsg(task, &sock, msgPtr, flags, deadline)?;
return Ok(res);
}
pub fn SysRecvMMsg(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let msgPtr = args.arg1 as u64;
let vlen = args.arg2 as i32;
let mut flags = args.arg3 as i32;
let timeout = args.arg4 as u64;
let file = task.GetFile(fd)?;
let sock = file.FileOp.clone();
if flags & !(MsgType::BASE_RECV_FLAGS | MsgType::MSG_CMSG_CLOEXEC | MsgType::MSG_ERRQUEUE) != 0
{
return Err(Error::SysError(SysErr::EINVAL));
}
if vlen < 0 {
return Err(Error::SysError(SysErr::EINVAL))
}
let mut vlen = vlen as u32;
if vlen > UIO_MAXIOV as u32 {
vlen = UIO_MAXIOV as u32;
}
let mut deadline = None;
if timeout != 0 {
let timePtr = task.CopyInObj::<Timespec>(timeout)?;
let now = MonotonicNow();
deadline = Some(Time(now + timePtr.ToNs()?));
}
if deadline.is_none() {
let dl = file.FileOp.RecvTimeout();
if dl > 0 {
let now = MonotonicNow();
deadline = Some(Time(now + dl));
} else if dl < 0 {
flags |= MsgType::MSG_DONTWAIT;
}
}
let mut count = 0;
let mut res = 0;
//let msgs = task.GetSliceMut::<MMsgHdr>(msgPtr, vlen as usize)?;
let mut msgs = task.CopyInVec::<MMsgHdr>(msgPtr, vlen as usize)?;
info!("SysRecvMMsg 1 vlen is {}", vlen);
for i in 0..vlen as usize {
res = match recvSingleMsg(
task,
&sock,
&(msgs[i].msgHdr) as *const MsgHdr as u64,
flags,
deadline,
) {
Err(e) => {
if count > 0 {
return Ok(count);
}
return Err(e);
}
Ok(n) => n,
};
if res < 0 {
break;
}
msgs[i].msgLen = res as u32;
count += 1;
}
if count == 0 {
return Err(Error::SysError(-res as i32));
}
task.CopyOutSlice(&msgs, msgPtr, vlen as usize)?;
return Ok(count);
}
pub const BASE_RECV_FLAGS: i32 = MsgType::MSG_OOB
| MsgType::MSG_DONTROUTE
| MsgType::MSG_DONTWAIT
| MsgType::MSG_NOSIGNAL
| MsgType::MSG_WAITALL
| MsgType::MSG_TRUNC
| MsgType::MSG_CTRUNC;
pub fn SysRecvFrom(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let bufPtr = args.arg1 as u64;
let buflen = args.arg2 as i64;
let flags = args.arg3 as i32;
let namePtr = args.arg4 as u64;
let nameLenPtr = args.arg5 as u64;
let file = task.GetFile(fd)?;
let sock = file.FileOp.clone();
if buflen < 0 {
return Err(Error::SysError(-SysErr::EINVAL));
}
if flags & !(BASE_RECV_FLAGS | MsgType::MSG_PEEK | MsgType::MSG_CONFIRM) != 0 {
return Err(Error::SysError(SysErr::EINVAL));
}
let mut flags = flags;
if !file.Blocking() {
flags |= MsgType::MSG_DONTWAIT
}
let iov = IoVec::NewFromAddr(bufPtr, buflen as usize);
let mut iovs: [IoVec; 1] = [iov];
let mut pMsg = MsgHdr::default();
let mut nameLen: i32 = 0;
if nameLenPtr != 0 {
nameLen = task.CopyInObj(nameLenPtr)?;
}
//todo: handle the msg.nameLen > 1024
let _msgVec = if namePtr != 0 {
let msgVec: Vec<u8> = vec![0; nameLen as usize];
pMsg.msgName = &msgVec[0] as *const _ as u64;
msgVec
} else {
Vec::new()
};
let mut deadline = None;
let dl = file.FileOp.RecvTimeout();
if dl > 0 {
let now = MonotonicNow();
deadline = Some(Time(now + dl));
} else if dl < 0 {
flags |= MsgType::MSG_DONTWAIT;
}
let (bytes, _, sender, _) =
sock.RecvMsg(task, &mut iovs, flags, deadline, nameLenPtr != 0, 0)?;
if nameLenPtr != 0 && sender.is_some() {
let (sender, senderLen) = sender.unwrap();
if senderLen != 2 {
if nameLen < senderLen as i32 {
return Err(Error::SysError(SysErr::ERANGE));
}
//let slices = task.GetSliceMut::<u8>(namePtr, nameLen as usize)?;
//sender.Marsh(slices, senderLen)?;
let mut dataBuf = DataBuff::New(nameLen as usize);
sender.Marsh(&mut dataBuf.buf, senderLen)?;
task.CopyOutSlice(&mut dataBuf.buf, namePtr, nameLen as usize)?;
//task.CopyOutSlice(&msgVec[0..pMsg.nameLen as usize], namePtr, nameLen as usize)?;
task.CopyOutObj(&(senderLen as u32), nameLenPtr)?;
} else {
// has only type
task.CopyOutObj(&(0 as u32), nameLenPtr)?;
}
}
return Ok(bytes as i64);
}
pub fn SysSendMsg(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let msgPtr = args.arg1 as u64;
let mut flags = args.arg2 as i32;
let file = task.GetFile(fd)?;
let sock = file.FileOp.clone();
if flags
& !(MsgType::MSG_DONTWAIT | MsgType::MSG_EOR | MsgType::MSG_MORE | MsgType::MSG_NOSIGNAL)
!= 0
{
return Err(Error::SysError(SysErr::EINVAL));
}
if !file.Blocking() {
flags |= MsgType::MSG_DONTWAIT;
}
let mut deadline = None;
let dl = sock.SendTimeout();
if dl > 0 {
let now = MonotonicNow();
deadline = Some(Time(now + dl));
} else if dl < 0 {
flags |= MsgType::MSG_DONTWAIT
}
let res = sendSingleMsg(task, &sock, msgPtr, flags, deadline)?;
return Ok(res);
}
pub fn SysSendMMsg(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let msgPtr = args.arg1 as u64;
let vlen = args.arg2 as i32;
let mut flags = args.arg3 as i32;
let file = task.GetFile(fd)?;
let sock = file.FileOp.clone();
if flags
& !(MsgType::MSG_DONTWAIT | MsgType::MSG_EOR | MsgType::MSG_MORE | MsgType::MSG_NOSIGNAL)
!= 0
{
return Err(Error::SysError(SysErr::EINVAL));
}
if vlen < 0 {
return Err(Error::SysError(SysErr::EINVAL))
}
let mut vlen = vlen as u32;
if vlen > UIO_MAXIOV as u32 {
vlen = UIO_MAXIOV as u32;
}
if !file.Blocking() {
flags |= MsgType::MSG_DONTWAIT;
}
let mut deadline = None;
let dl = sock.SendTimeout();
if dl > 0 {
let now = MonotonicNow();
deadline = Some(Time(now + dl));
} else if dl < 0 {
flags |= MsgType::MSG_DONTWAIT
}
let mut count = 0;
let mut res = 0;
//let msgs = task.GetSliceMut::<MMsgHdr>(msgPtr, vlen as usize)?;
let mut msgs = task.CopyInVec::<MMsgHdr>(msgPtr, vlen as usize)?;
for i in 0..vlen as usize {
res = sendSingleMsg(
task,
&sock,
&(msgs[i].msgHdr) as *const MsgHdr as u64,
flags,
deadline,
)?;
if res < 0 {
break;
}
msgs[i].msgLen = res as u32;
count += 1;
}
if count == 0 {
return Err(Error::SysError(-res as i32));
}
task.CopyOutSlice(&msgs, msgPtr, vlen as usize)?;
return Ok(count);
}
pub fn SysSendTo(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let fd = args.arg0 as i32;
let bufPtr = args.arg1 as u64;
let buflen = args.arg2 as i64;
let mut flags = args.arg3 as i32;
let namePtr = args.arg4 as u64;
let nameLen = args.arg5 as u32;
let file = task.GetFile(fd)?;
let sock = file.FileOp.clone();
if buflen < 0 {
return Err(Error::SysError(-SysErr::EINVAL));
}
task.CheckPermission(bufPtr, buflen as u64, false, false)?;
let iov = IoVec::NewFromAddr(bufPtr, buflen as usize);
let iovs: [IoVec; 1] = [iov];
let mut pMsg = MsgHdr::default();
let _msgVec = if namePtr != 0 && nameLen > 0 {
//let vec = task.GetSlice::<u8>(namePtr, nameLen as usize)?.to_vec();
let vec = task.CopyInVec::<u8>(namePtr, nameLen as usize)?;
pMsg.msgName = vec.as_ptr() as u64;
pMsg.nameLen = nameLen;
Some(vec)
} else {
pMsg.msgName = 0;
pMsg.nameLen = 0;
None
};
let mut deadline = None;
let dl = sock.SendTimeout();
if dl > 0 {
let now = MonotonicNow();
deadline = Some(Time(now + dl));
} else if dl < 0 {
flags |= MsgType::MSG_DONTWAIT
}
if !file.Blocking() {
flags |= MsgType::MSG_DONTWAIT;
}
let res = sock.SendMsg(task, &iovs, flags, &mut pMsg, deadline)?;
return Ok(res);
}
| 27.86262 | 118 | 0.590758 |
5d693e0341a74559b12f544843d83c809aeab53e | 55,127 | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Tools for providing Fuchsia services.
use {
byteorder::{LittleEndian, WriteBytesExt as _},
failure::{bail, Error, Fail, ResultExt},
fidl::{
encoding::OutOfLine,
endpoints::{DiscoverableService, Proxy as _, RequestStream, ServerEnd},
},
fidl_fuchsia_io::{
DirectoryObject, DirectoryProxy, DirectoryRequest, DirectoryRequestStream, FileRequest,
FileRequestStream, NodeAttributes, NodeInfo, NodeMarker, NodeRequest, NodeRequestStream,
SeekOrigin, OPEN_FLAG_DESCRIBE, OPEN_FLAG_DIRECTORY, OPEN_FLAG_NODE_REFERENCE,
OPEN_FLAG_NOT_DIRECTORY, OPEN_FLAG_POSIX, OPEN_RIGHT_READABLE, OPEN_RIGHT_WRITABLE,
},
fidl_fuchsia_sys::{
EnvironmentControllerProxy, EnvironmentMarker, EnvironmentOptions, LauncherProxy,
LoaderMarker, ServiceList,
},
fuchsia_async as fasync,
fuchsia_zircon::{self as zx, HandleBased as _, Peered, Signals},
futures::{
stream::{FuturesUnordered, StreamExt},
task::Context,
Poll, Stream,
},
std::{
cmp::min,
collections::hash_map::{Entry, HashMap},
io::Write,
marker::{PhantomData, Unpin},
pin::Pin,
sync::Arc,
},
};
mod service;
pub use service::{FidlService, Service, ServiceObj, ServiceObjLocal, ServiceObjTrait};
mod stream_helpers;
use stream_helpers::NextWith;
enum Directory {
Local { children: HashMap<String, usize> },
Remote(DirectoryProxy),
}
enum ServiceFsNode<ServiceObjTy: ServiceObjTrait> {
Directory(Directory),
Service(ServiceObjTy),
VmoFile { vmo: zx::Vmo, offset: u64, length: u64 },
}
const NOT_A_DIR: &str = "ServiceFs expected directory";
impl<ServiceObjTy: ServiceObjTrait> ServiceFsNode<ServiceObjTy> {
fn is_service(&self) -> bool {
if let ServiceFsNode::Service(_) = self {
true
} else {
false
}
}
fn expect_dir(&self) -> &HashMap<String, usize> {
if let ServiceFsNode::Directory(Directory::Local { children }) = self {
children
} else {
panic!(NOT_A_DIR)
}
}
fn expect_dir_mut(&mut self) -> &mut HashMap<String, usize> {
if let ServiceFsNode::Directory(Directory::Local { children }) = self {
children
} else {
panic!(NOT_A_DIR)
}
}
fn to_dirent_type(&self) -> u8 {
match self {
ServiceFsNode::Directory(_) => fidl_fuchsia_io::DIRENT_TYPE_DIRECTORY,
ServiceFsNode::Service(_) => fidl_fuchsia_io::DIRENT_TYPE_SERVICE,
ServiceFsNode::VmoFile { .. } => fidl_fuchsia_io::DIRENT_TYPE_FILE,
}
}
}
/// A filesystem which connects clients to services.
///
/// This type implements the `Stream` trait and will yield the values
/// returned from calling `Service::connect` on the services it hosts.
///
/// This can be used to, for example, yield streams of channels, request
/// streams, futures to run, or any other value that should be processed
/// as the result of a request.
#[must_use]
pub struct ServiceFs<ServiceObjTy: ServiceObjTrait> {
/// Open connections to `ServiceFs` directories.
dir_connections: FuturesUnordered<DirConnection>,
/// Open connections to `ServiceFs` files.
file_connections: FuturesUnordered<FileConnection>,
/// Open connections to `ServiceFs` nodes (directories, files, or service).
node_connections: FuturesUnordered<NodeConnection>,
/// The tree of `ServiceFsNode`s.
/// The root is always a directory at index 0.
///
//
// FIXME(cramertj) move to a generational index and support
// removal of nodes.
nodes: Vec<ServiceFsNode<ServiceObjTy>>,
}
const ROOT_NODE: usize = 0;
const NO_FLAGS: u32 = 0;
const CLONE_REQ_SUPPORTED_FLAGS: u32 =
OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE | OPEN_FLAG_DESCRIBE;
const OPEN_REQ_SUPPORTED_FLAGS: u32 = OPEN_RIGHT_READABLE
| OPEN_RIGHT_WRITABLE
| OPEN_FLAG_DESCRIBE
| OPEN_FLAG_POSIX
| OPEN_FLAG_DIRECTORY
| OPEN_FLAG_NODE_REFERENCE;
impl<'a, Output: 'a> ServiceFs<ServiceObjLocal<'a, Output>> {
/// Create a new `ServiceFs` that is singlethreaded-only and does not
/// require services to implement `Send`.
pub fn new_local() -> Self {
Self {
dir_connections: FuturesUnordered::new(),
file_connections: FuturesUnordered::new(),
node_connections: FuturesUnordered::new(),
nodes: vec![ServiceFsNode::Directory(Directory::Local { children: HashMap::new() })],
}
}
}
impl<'a, Output: 'a> ServiceFs<ServiceObj<'a, Output>> {
/// Create a new `ServiceFs` that is multithreaded-capable and requires
/// services to implement `Send`.
pub fn new() -> Self {
Self {
dir_connections: FuturesUnordered::new(),
file_connections: FuturesUnordered::new(),
node_connections: FuturesUnordered::new(),
nodes: vec![ServiceFsNode::Directory(Directory::Local { children: HashMap::new() })],
}
}
}
/// A directory within a `ServiceFs`.
///
/// Services and subdirectories can be added to it.
pub struct ServiceFsDir<'a, ServiceObjTy: ServiceObjTrait> {
position: usize,
fs: &'a mut ServiceFs<ServiceObjTy>,
}
/// Finds the position at which a new directory should be added.
fn find_dir_insert_position<ServiceObjTy: ServiceObjTrait>(
fs: &mut ServiceFs<ServiceObjTy>,
position: usize,
path: String,
) -> usize {
let new_node_position = fs.nodes.len();
let self_dir = fs.nodes[position].expect_dir_mut();
let &mut position = self_dir.entry(path.clone()).or_insert(new_node_position);
if position != new_node_position {
if let ServiceFsNode::Service(_) = &fs.nodes[position] {
panic!("Error adding dir to ServiceFs: existing service at \"{}\"", path)
}
}
position
}
fn dir<'a, ServiceObjTy: ServiceObjTrait>(
fs: &'a mut ServiceFs<ServiceObjTy>,
position: usize,
path: String,
) -> ServiceFsDir<'a, ServiceObjTy> {
let new_node_position = fs.nodes.len();
let position = find_dir_insert_position(fs, position, path);
if position == new_node_position {
fs.nodes.push(ServiceFsNode::Directory(Directory::Local { children: HashMap::new() }));
}
ServiceFsDir { position, fs }
}
fn remote<'a, ServiceObjTy: ServiceObjTrait>(
fs: &'a mut ServiceFs<ServiceObjTy>,
position: usize,
name: String,
proxy: DirectoryProxy,
) {
let new_node_position = fs.nodes.len();
let position = find_dir_insert_position(fs, position, name);
if position == new_node_position {
fs.nodes.push(ServiceFsNode::Directory(Directory::Remote(proxy)));
}
}
fn add_entry<ServiceObjTy: ServiceObjTrait>(
fs: &mut ServiceFs<ServiceObjTy>,
position: usize,
path: String,
entry: ServiceFsNode<ServiceObjTy>,
) {
let new_node_position = fs.nodes.len();
let self_dir = fs.nodes[position].expect_dir_mut();
let map_entry = self_dir.entry(path);
match map_entry {
Entry::Occupied(prev) => {
panic!("Duplicate ServiceFs entry added at path \"{}\"", prev.key())
}
Entry::Vacant(slot) => {
slot.insert(new_node_position);
fs.nodes.push(entry);
}
}
}
/// A `Service` implementation that proxies requests
/// to the outside environment.
///
/// Not intended for direct use. Use the `add_proxy_service`
/// function instead.
#[doc(hidden)]
pub struct Proxy<S, O>(PhantomData<(S, fn() -> O)>);
impl<S: DiscoverableService, O> Service for Proxy<S, O> {
type Output = O;
fn connect(&mut self, channel: zx::Channel) -> Option<O> {
if let Err(e) = crate::client::connect_channel_to_service::<S>(channel) {
eprintln!("failed to proxy request to {}: {:?}", S::SERVICE_NAME, e);
}
None
}
}
/// A `Service` implementation that proxies requests to the given component.
///
/// Not intended for direct use. Use the `add_proxy_service_to` function instead.
#[doc(hidden)]
pub struct ProxyTo<S, O> {
directory_request: Arc<zx::Channel>,
_phantom: PhantomData<(S, fn() -> O)>,
}
impl<S: DiscoverableService, O> Service for ProxyTo<S, O> {
type Output = O;
fn connect(&mut self, channel: zx::Channel) -> Option<O> {
if let Err(e) = fdio::service_connect_at(&self.directory_request, S::SERVICE_NAME, channel) {
eprintln!("failed to proxy request to {}: {:?}", S::SERVICE_NAME, e);
}
None
}
}
struct LaunchData {
component_url: String,
arguments: Option<Vec<String>>,
}
/// A `Service` implementation that proxies requests
/// to a launched component.
///
/// Not intended for direct use. Use the `add_component_proxy_service`
/// function instead.
#[doc(hidden)]
pub struct ComponentProxy<O> {
launch_data: Option<LaunchData>,
launched_app: Option<crate::client::App>,
service_name: &'static str,
_marker: PhantomData<O>,
}
impl<O> Service for ComponentProxy<O> {
type Output = O;
fn connect(&mut self, channel: zx::Channel) -> Option<O> {
let res = (|| {
if let Some(LaunchData { component_url, arguments }) = self.launch_data.take() {
self.launched_app = Some(crate::client::launch(
&crate::client::launcher()?,
component_url,
arguments,
)?);
}
if let Some(app) = self.launched_app.as_ref() {
app.pass_to_named_service(self.service_name, channel.into())?;
}
Ok::<(), Error>(())
})();
if let Err(e) = res {
eprintln!("ServiceFs failed to launch component: {:?}", e);
}
None
}
}
// Not part of a trait so that clients won't have to import a trait
// in order to call these functions.
macro_rules! add_functions {
() => {
/// Adds a service to the directory at the given path.
///
/// The path must be a single component containing no `/` characters.
///
/// Panics if any node has already been added at the given path.
pub fn add_service_at(
&mut self,
path: impl Into<String>,
service: impl Into<ServiceObjTy>,
) -> &mut Self {
self.add_entry_at(
path.into(),
ServiceFsNode::Service(service.into()),
)
}
/// Adds a FIDL service to the directory.
///
/// The FIDL service will be hosted at the name provided by the
/// `[Discoverable]` annotation in the FIDL source.
pub fn add_fidl_service<F, RS>(
&mut self,
service: F,
) -> &mut Self
where
F: FnMut(RS) -> ServiceObjTy::Output,
RS: RequestStream,
RS::Service: DiscoverableService,
FidlService<F, RS, ServiceObjTy::Output>: Into<ServiceObjTy>,
{
self.add_fidl_service_at(
RS::Service::SERVICE_NAME,
service,
)
}
/// Adds a FIDL service to the directory at the given path.
///
/// The path must be a single component containing no `/` characters.
pub fn add_fidl_service_at<F, RS>(
&mut self,
path: impl Into<String>,
service: F,
) -> &mut Self
where
F: FnMut(RS) -> ServiceObjTy::Output,
RS: RequestStream,
RS::Service: DiscoverableService,
FidlService<F, RS, ServiceObjTy::Output>: Into<ServiceObjTy>,
{
self.add_service_at(
path,
FidlService::from(service),
)
}
/// Adds a service that proxies requests to the current environment.
// NOTE: we'd like to be able to remove the type parameter `O` here,
// but unfortunately the bound `ServiceObjTy: From<Proxy<S, ServiceObjTy::Output>>`
// makes type checking angry.
pub fn add_proxy_service<S: DiscoverableService, O>(&mut self) -> &mut Self
where
ServiceObjTy: From<Proxy<S, O>>,
ServiceObjTy: ServiceObjTrait<Output = O>,
{
self.add_service_at(
S::SERVICE_NAME,
Proxy::<S, ServiceObjTy::Output>(PhantomData),
)
}
/// Adds a service that proxies requests to the given component.
// NOTE: we'd like to be able to remove the type parameter `O` here,
// but unfortunately the bound `ServiceObjTy: From<Proxy<S, ServiceObjTy::Output>>`
// makes type checking angry.
pub fn add_proxy_service_to<S: DiscoverableService, O>(&mut self, directory_request: Arc<zx::Channel>) -> &mut Self
where
ServiceObjTy: From<ProxyTo<S, O>>,
ServiceObjTy: ServiceObjTrait<Output = O>,
{
self.add_service_at(
S::SERVICE_NAME,
ProxyTo::<S, ServiceObjTy::Output>{
directory_request, _phantom: PhantomData}
)
}
/// Add a service to the `ServicesServer` that will launch a component
/// upon request, proxying requests to the launched component.
pub fn add_component_proxy_service<O>(
&mut self,
service_name: &'static str,
component_url: String,
arguments: Option<Vec<String>>,
) -> &mut Self
where
ServiceObjTy: From<ComponentProxy<O>>,
ServiceObjTy: ServiceObjTrait<Output = O>,
{
self.add_service_at(
service_name,
ComponentProxy {
launch_data: Some(LaunchData { component_url, arguments }),
launched_app: None,
service_name,
_marker: PhantomData,
}
)
}
/// Adds a VMO file to the directory at the given path.
///
/// The path must be a single component containing no `/` characters.
///
/// Panics if any node has already been added at the given path.
pub fn add_vmo_file_at(
&mut self,
path: impl Into<String>,
vmo: zx::Vmo,
offset: u64,
length: u64,
) -> &mut Self {
self.add_entry_at(
path.into(),
ServiceFsNode::VmoFile { vmo, offset, length },
)
}
};
}
impl<'a, ServiceObjTy: ServiceObjTrait> ServiceFsDir<'a, ServiceObjTy> {
/// Returns a reference to the subdirectory at the given path,
/// creating one if none exists.
///
/// The path must be a single component containing no `/` characters.
///
/// Panics if a service has already been added at the given path.
pub fn dir<'b>(&'b mut self, path: impl Into<String>) -> ServiceFsDir<'b, ServiceObjTy> {
dir(self.fs, self.position, path.into())
}
/// Adds a new remote directory served over the given DirectoryProxy.
///
/// The name must not contain any '/' characters.
pub fn add_remote(&mut self, name: impl Into<String>, proxy: DirectoryProxy) {
remote(self.fs, self.position, name.into(), proxy)
}
fn add_entry_at(&mut self, path: String, entry: ServiceFsNode<ServiceObjTy>) -> &mut Self {
add_entry(self.fs, self.position, path, entry);
self
}
add_functions!();
}
impl<ServiceObjTy: ServiceObjTrait> ServiceFs<ServiceObjTy> {
/// Returns a reference to the subdirectory at the given path,
/// creating one if none exists.
///
/// The path must be a single component containing no `/` characters.
///
/// Panics if a service has already been added at the given path.
pub fn dir<'a>(&'a mut self, path: impl Into<String>) -> ServiceFsDir<'a, ServiceObjTy> {
dir(self, ROOT_NODE, path.into())
}
/// Adds a new remote directory served over the given DirectoryProxy.
///
/// The name must not contain any '/' characters.
pub fn add_remote(&mut self, name: impl Into<String>, proxy: DirectoryProxy) {
remote(self, ROOT_NODE, name.into(), proxy)
}
fn add_entry_at(&mut self, path: String, entry: ServiceFsNode<ServiceObjTy>) -> &mut Self {
add_entry(self, ROOT_NODE, path, entry);
self
}
add_functions!();
/// Start serving directory protocol service requests via a `ServiceList`.
/// The resulting `ServiceList` can be attached to a new environment in
/// order to provide child components with access to these services.
pub fn host_services_list(&mut self) -> Result<ServiceList, Error> {
let names = self.nodes[ROOT_NODE]
.expect_dir()
.iter()
.filter(|(_, v)| self.nodes[**v].is_service())
.map(|(k, _)| k)
.cloned()
.collect();
let (chan1, chan2) = zx::Channel::create()?;
self.serve_connection(chan1)?;
Ok(ServiceList { names, provider: None, host_directory: Some(chan2) })
}
/// Creates a new environment that only has access to the services provided through this
/// `ServiceFs` and the enclosing environment's `Loader` service, appending a few random
/// bytes to the given `environment_label_prefix` to ensure this environment has a unique
/// name.
///
/// Note that the resulting `NestedEnvironment` must be kept alive for the environment to
/// continue to exist. Once dropped, the environment and all components launched within it
/// will be destroyed.
pub fn create_salted_nested_environment<O>(
&mut self,
environment_label_prefix: &str,
) -> Result<NestedEnvironment, Error>
where
ServiceObjTy: From<Proxy<LoaderMarker, O>>,
ServiceObjTy: ServiceObjTrait<Output = O>,
{
let mut salt = [0; 4];
fuchsia_zircon::cprng_draw(&mut salt[..]).expect("zx_cprng_draw does not fail");
let environment_label = format!("{}_{}", environment_label_prefix, hex::encode(&salt));
self.create_nested_environment(&environment_label)
}
/// Creates a new environment that only has access to the services provided through this
/// `ServiceFs` and the enclosing environment's `Loader` service.
///
/// Note that the resulting `NestedEnvironment` must be kept alive for the environment to
/// continue to exist. Once dropped, the environment and all components launched within it
/// will be destroyed.
pub fn create_nested_environment<O>(
&mut self,
environment_label: &str,
) -> Result<NestedEnvironment, Error>
where
ServiceObjTy: From<Proxy<LoaderMarker, O>>,
ServiceObjTy: ServiceObjTrait<Output = O>,
{
let env = crate::client::connect_to_service::<EnvironmentMarker>()
.context("connecting to current environment")?;
let services_with_loader = self.add_proxy_service::<LoaderMarker, _>();
let mut service_list = services_with_loader.host_services_list()?;
let (new_env, new_env_server_end) = fidl::endpoints::create_proxy()?;
let (controller, controller_server_end) = fidl::endpoints::create_proxy()?;
let (launcher, launcher_server_end) = fidl::endpoints::create_proxy()?;
let (directory_request, directory_server_end) = zx::Channel::create()?;
env.create_nested_environment(
new_env_server_end,
controller_server_end,
environment_label,
Some(fidl::encoding::OutOfLine(&mut service_list)),
&mut EnvironmentOptions {
inherit_parent_services: false,
allow_parent_runners: false,
kill_on_oom: false,
delete_storage_on_death: false,
},
)
.context("creating isolated environment")?;
new_env.get_launcher(launcher_server_end).context("getting nested environment launcher")?;
self.serve_connection(directory_server_end)?;
Ok(NestedEnvironment { controller, launcher, directory_request })
}
/// Starts a new component inside an environment that only has access to
/// the services provided through this `ServicesServer`.
///
/// Note that the resulting `App` and `EnvironmentControllerProxy` must be kept
/// alive for the component to continue running. Once they are dropped, the
/// component will be destroyed.
pub fn launch_component_in_nested_environment<O>(
&mut self,
url: String,
arguments: Option<Vec<String>>,
environment_label: &str,
) -> Result<(EnvironmentControllerProxy, crate::client::App), Error>
where
ServiceObjTy: From<Proxy<LoaderMarker, O>>,
ServiceObjTy: ServiceObjTrait<Output = O>,
{
let (new_env_controller, app) = self.launch_component_in_nested_environment_with_options(
url,
arguments,
crate::client::LaunchOptions::new(),
environment_label,
)?;
Ok((new_env_controller, app))
}
/// Starts a new component inside an isolated environment with custom launch
/// options, see the comment for |launch_component_in_nested_environment()|
/// above.
pub fn launch_component_in_nested_environment_with_options<O>(
&mut self,
url: String,
arguments: Option<Vec<String>>,
options: crate::client::LaunchOptions,
environment_label: &str,
) -> Result<(EnvironmentControllerProxy, crate::client::App), Error>
where
ServiceObjTy: From<Proxy<LoaderMarker, O>>,
ServiceObjTy: ServiceObjTrait<Output = O>,
{
let NestedEnvironment { controller, launcher, directory_request: _ } =
self.create_nested_environment(environment_label)?;
let app = crate::client::launch_with_options(&launcher, url, arguments, options)?;
Ok((controller, app))
}
}
/// `NestedEnvironment` represents an environment nested within another.
///
/// When `NestedEnvironment` is dropped, the environment and all components started within it
/// will be terminated.
#[must_use = "Dropping `NestedEnvironment` will cause the environment to be terminated."]
pub struct NestedEnvironment {
controller: EnvironmentControllerProxy,
launcher: LauncherProxy,
directory_request: zx::Channel,
}
impl NestedEnvironment {
/// Returns a reference to the environment's controller.
#[inline]
pub fn controller(&self) -> &EnvironmentControllerProxy {
&self.controller
}
/// Returns a reference to the environment's launcher.
#[inline]
pub fn launcher(&self) -> &LauncherProxy {
&self.launcher
}
/// Connect to a service provided by this environment.
#[inline]
pub fn connect_to_service<S: DiscoverableService>(&self) -> Result<S::Proxy, Error> {
let (client_channel, server_channel) = zx::Channel::create()?;
self.pass_to_service::<S>(server_channel)?;
Ok(S::Proxy::from_channel(fasync::Channel::from_channel(client_channel)?))
}
/// Connect to a service by passing a channel for the server.
#[inline]
pub fn pass_to_service<S: DiscoverableService>(
&self,
server_channel: zx::Channel,
) -> Result<(), Error> {
self.pass_to_named_service(S::SERVICE_NAME, server_channel)
}
/// Connect to a service by name.
#[inline]
pub fn pass_to_named_service(
&self,
service_name: &str,
server_channel: zx::Channel,
) -> Result<(), Error> {
fdio::service_connect_at(&self.directory_request, service_name, server_channel)?;
Ok(())
}
}
enum ConnectionState {
Open,
Closed,
}
/// A client connection to a directory of `ServiceFs`.
type DirConnection = NextWith<DirectoryRequestStream, DirConnectionData>;
struct DirConnectionData {
/// The current node of the `DirConnection` in the `ServiceFs`
/// filesystem.
position: usize,
/// Buffer and position of current DirectoryRequest::ReadDirents
dirents_buf: Option<(Vec<u8>, usize)>,
}
/// A client connection to a file in `ServiceFs`.
type FileConnection = NextWith<FileRequestStream, FileConnectionData>;
struct FileConnectionData {
position: usize,
seek_offset: u64,
}
/// A client connection to any node in `ServiceFs`.
type NodeConnection = NextWith<NodeRequestStream, NodeConnectionData>;
struct NodeConnectionData {
position: usize,
}
/// An error indicating the startup handle on which the FIDL server
/// attempted to start was missing.
#[derive(Debug, Fail)]
#[fail(display = "The startup handle on which the FIDL server attempted to start was missing.")]
pub struct MissingStartupHandle;
fn send_failed_on_open(
object: ServerEnd<NodeMarker>,
status: zx::sys::zx_status_t,
) -> Result<(), Error> {
let (_stream, control_handle) = object
.into_stream_and_control_handle()
.context("fail to convert to stream and control handle")?;
control_handle.send_on_open_(status, None).context("fail sending OnOpenEvent")?;
Ok(())
}
fn maybe_send_error(
object: ServerEnd<NodeMarker>,
flags: u32,
error: zx::sys::zx_status_t,
) -> Result<(), Error> {
if (flags & OPEN_FLAG_DESCRIBE) != 0 {
send_failed_on_open(object, error)?;
}
Ok(())
}
fn handle_potentially_unsupported_flags(
object: ServerEnd<NodeMarker>,
flags: u32,
supported_flags_bitmask: u32,
) -> Result<ServerEnd<NodeMarker>, Error> {
let unsupported_flags = flags & !supported_flags_bitmask;
if unsupported_flags != 0 {
maybe_send_error(object, flags, zx::sys::ZX_ERR_NOT_SUPPORTED)?;
bail!("unsupported flags: {:b}", unsupported_flags);
} else {
Ok(object)
}
}
macro_rules! unsupported {
($responder:ident $($args:tt)*) => {
$responder.send(zx::sys::ZX_ERR_NOT_SUPPORTED $($args)*)
}
}
// Can't be a single function because DirectoryRequestStream,
// FileRequestStream, and NodeRequestStream don't have matching types, even though their
// function signatures are identical.
macro_rules! send_info_fn {
($(($name:ident, $stream:ty),)*) => { $(
fn $name(stream: &$stream, info: Option<NodeInfo>) -> Result<(), Error> {
if let Some(mut info) = info {
stream
.control_handle()
.send_on_open_(zx::sys::ZX_OK, Some(OutOfLine(&mut info)))
.context("fail sending OnOpen event")?;
}
Ok(())
}
)* }
}
#[rustfmt::skip]
send_info_fn![
(send_info_dir, DirectoryRequestStream),
(send_info_file, FileRequestStream),
(send_info_node, NodeRequestStream),
];
fn into_async(chan: zx::Channel) -> Result<fasync::Channel, Error> {
Ok(fasync::Channel::from_channel(chan).context("failure to convert to async channel")?)
}
#[derive(Debug)]
enum DescendResult<'a> {
LocalChildren(&'a HashMap<String, usize>),
RemoteDir((&'a DirectoryProxy, String)),
}
impl<ServiceObjTy: ServiceObjTrait> ServiceFs<ServiceObjTy> {
/// Removes the `DirectoryRequest` startup handle for the current
/// component and adds connects it to this `ServiceFs` as a client.
///
/// Multiple calls to this function from the same component will
/// result in `Err(MissingStartupHandle)`.
pub fn take_and_serve_directory_handle(&mut self) -> Result<&mut Self, Error> {
let startup_handle = fuchsia_runtime::take_startup_handle(
fuchsia_runtime::HandleType::DirectoryRequest.into(),
)
.ok_or(MissingStartupHandle)?;
self.serve_connection(zx::Channel::from(startup_handle))
}
/// Add an additional connection to the `ServiceFs` to provide services to.
pub fn serve_connection(&mut self, chan: zx::Channel) -> Result<&mut Self, Error> {
match self.serve_connection_at(chan.into(), ROOT_NODE, NO_FLAGS)? {
Some(_) => panic!("root directory connection should not return output"),
None => {}
}
Ok(self)
}
/// Serve a connection at a specific node.
fn serve_connection_at(
&mut self,
object: ServerEnd<NodeMarker>,
position: usize,
flags: u32,
) -> Result<Option<ServiceObjTy::Output>, Error> {
let node = &self.nodes[position];
// Forward requests for a remote directory to that directory.
match node {
ServiceFsNode::Directory(Directory::Remote(proxy)) => {
proxy.clone(flags, object)?;
return Ok(None);
}
_ => {}
}
// It is not an error if the other end of the channel is already
// closed: the client may call Directory::Open, send a channel chan,
// then write a request on their local end of chan. If the request
// does not expect a response (eg. Directory::Open in a subdirectory),
// the client may close chan immediately afterwards. We should keep
// our end of the channel until we have processed all incoming requests.
object
.channel()
.signal_peer(Signals::NONE, Signals::USER_0)
.or_else(|e| match e {
zx::Status::PEER_CLOSED => Ok(()),
e => Err(e),
})
.context("ServiceFs signal_peer failed")?;
let info = if (flags & OPEN_FLAG_DESCRIBE) != 0 {
Some(self.describe_node(position)?)
} else {
None
};
let is_directory = if let ServiceFsNode::Directory { .. } = node { true } else { false };
if (flags & OPEN_FLAG_DIRECTORY != 0) && !is_directory {
send_failed_on_open(object, zx::sys::ZX_ERR_NOT_DIR)?;
return Ok(None);
}
if (flags & OPEN_FLAG_NOT_DIRECTORY != 0) && is_directory {
send_failed_on_open(object, zx::sys::ZX_ERR_NOT_FILE)?;
return Ok(None);
}
if flags & OPEN_FLAG_NODE_REFERENCE != 0 {
let chan = into_async(object.into_channel())?;
let stream = NodeRequestStream::from_channel(chan);
send_info_node(&stream, info)?;
self.node_connections
.push(NodeConnection::new(stream, NodeConnectionData { position }));
return Ok(None);
}
let chan = object.into_channel();
match &mut self.nodes[position] {
ServiceFsNode::Directory { .. } => {
let chan = into_async(chan)?;
let stream = DirectoryRequestStream::from_channel(chan);
send_info_dir(&stream, info)?;
self.dir_connections.push(DirConnection::new(
stream,
DirConnectionData { position, dirents_buf: None },
));
Ok(None)
}
ServiceFsNode::VmoFile { .. } => {
let chan = into_async(chan)?;
let stream = FileRequestStream::from_channel(chan);
send_info_file(&stream, info)?;
self.file_connections.push(FileConnection::new(
stream,
FileConnectionData { position, seek_offset: 0 },
));
Ok(None)
}
ServiceFsNode::Service(service) => Ok(service.service().connect(chan)),
}
}
fn handle_clone(
&mut self,
flags: u32,
object: ServerEnd<NodeMarker>,
position: usize,
) -> Option<ServiceObjTy::Output> {
match (|| {
let object =
handle_potentially_unsupported_flags(object, flags, CLONE_REQ_SUPPORTED_FLAGS)?;
self.serve_connection_at(object, position, flags)
})() {
Ok(output) => output,
Err(e) => {
eprintln!("ServiceFs failed to clone: {:?}", e);
None
}
}
}
fn handle_dir_request(
&mut self,
request: DirectoryRequest,
connection: &mut DirConnectionData,
) -> Result<(Option<ServiceObjTy::Output>, ConnectionState), Error> {
assert!(self.nodes.len() > connection.position);
match request {
DirectoryRequest::Clone { flags, object, control_handle: _ } => {
match self.handle_clone(flags, object, connection.position) {
Some(_) => panic!("cloning directory connection should not return output"),
None => {}
}
}
DirectoryRequest::Close { responder } => {
responder.send(zx::sys::ZX_OK)?;
return Ok((None, ConnectionState::Closed));
}
DirectoryRequest::Open { flags, mode, path, object, control_handle: _ } => {
let object =
handle_potentially_unsupported_flags(object, flags, OPEN_REQ_SUPPORTED_FLAGS)?;
if path == "." {
match self.serve_connection_at(object, connection.position, flags) {
Ok(Some(_)) => panic!("serving directory '.' should not return output"),
Ok(None) => {}
Err(e) => eprintln!("ServiceFs failed to serve '.': {:?}", e),
}
return Ok((None, ConnectionState::Open));
} else if path == "" {
maybe_send_error(object, flags, zx::sys::ZX_ERR_BAD_PATH)?;
return Ok((None, ConnectionState::Open));
}
let mut segments = path.rsplitn(2, "/");
let end_segment = segments.next().unwrap();
let directory_segment = segments.next();
let descend_result = match self.descend(connection.position, directory_segment) {
Ok(r) => r,
Err(_) => {
maybe_send_error(object, flags, zx::sys::ZX_ERR_BAD_PATH)?;
return Ok((None, ConnectionState::Open));
}
};
match descend_result {
DescendResult::LocalChildren(children) => {
if let Some(&next_node_pos) = children.get(end_segment) {
let output = self.serve_connection_at(object, next_node_pos, flags)?;
return Ok((output, ConnectionState::Open));
} else {
maybe_send_error(object, flags, zx::sys::ZX_ERR_NOT_FOUND)?;
return Ok((None, ConnectionState::Open));
}
}
DescendResult::RemoteDir((proxy, remaining_path)) => {
let remaining_path = vec![remaining_path, end_segment.to_owned()].join("/");
proxy.open(flags, mode, &remaining_path, object)?;
return Ok((None, ConnectionState::Open));
}
}
}
DirectoryRequest::Describe { responder } => {
let mut info = self.describe_node(connection.position)?;
responder.send(&mut info)?;
}
DirectoryRequest::GetAttr { responder } => {
let mut attrs = self.node_attrs(connection.position);
responder.send(zx::sys::ZX_OK, &mut attrs)?
}
DirectoryRequest::SetAttr { responder, .. } => unsupported!(responder)?,
DirectoryRequest::Ioctl { responder, .. } => {
unsupported!(responder, &mut std::iter::empty(), &mut std::iter::empty())?
}
DirectoryRequest::Sync { responder } => unsupported!(responder)?,
DirectoryRequest::Unlink { responder, .. } => unsupported!(responder)?,
DirectoryRequest::ReadDirents { max_bytes, responder } => {
let children = self.children_for_dir(connection.position)?;
let dirents_buf = connection
.dirents_buf
.get_or_insert_with(|| (self.to_dirent_bytes(&children), 0));
let (dirents_buf, offset) = (&mut dirents_buf.0, &mut dirents_buf.1);
if *offset >= dirents_buf.len() {
responder.send(zx::sys::ZX_OK, &mut std::iter::empty())?;
} else {
let new_offset = std::cmp::min(dirents_buf.len(), *offset + max_bytes as usize);
responder.send(
zx::sys::ZX_OK,
&mut dirents_buf[*offset..new_offset].iter().cloned(),
)?;
*offset = new_offset;
}
}
DirectoryRequest::Rewind { responder } => {
connection.dirents_buf = None;
responder.send(zx::sys::ZX_OK)?;
}
DirectoryRequest::GetToken { responder } => unsupported!(responder, None)?,
DirectoryRequest::Rename { responder, .. } => unsupported!(responder)?,
DirectoryRequest::Link { responder, .. } => unsupported!(responder)?,
DirectoryRequest::Watch { responder, .. } => unsupported!(responder)?,
}
Ok((None, ConnectionState::Open))
}
fn handle_file_request(
&mut self,
request: FileRequest,
connection: &mut FileConnectionData,
) -> Result<ConnectionState, Error> {
match request {
FileRequest::Clone { flags, object, control_handle: _ } => {
match self.handle_clone(flags, object, connection.position) {
Some(_) => panic!("file clone should not return output"),
None => {}
}
}
FileRequest::Close { responder } => {
responder.send(zx::sys::ZX_OK)?;
return Ok(ConnectionState::Closed);
}
FileRequest::Describe { responder } => {
let mut info = self.describe_node(connection.position)?;
responder.send(&mut info)?;
}
FileRequest::Sync { responder } => unsupported!(responder)?,
FileRequest::GetAttr { responder } => {
let mut attrs = self.node_attrs(connection.position);
responder.send(zx::sys::ZX_OK, &mut attrs)?
}
FileRequest::SetAttr { responder, .. } => unsupported!(responder)?,
FileRequest::Ioctl { responder, .. } => {
unsupported!(responder, &mut std::iter::empty(), &mut std::iter::empty())?
}
// FIXME(cramertj) enforce READ rights
FileRequest::Read { count, responder } => match &self.nodes[connection.position] {
ServiceFsNode::Directory { .. } | ServiceFsNode::Service(_) => {
panic!("read on non-file node")
}
ServiceFsNode::VmoFile { vmo, length, offset } => {
let actual_count = min(count, length.saturating_sub(connection.seek_offset));
let mut data = vec![0; actual_count as usize];
let status = vmo.read(&mut data, offset.saturating_add(connection.seek_offset));
match status {
Ok(()) => {
responder.send(zx::sys::ZX_OK, &mut data.iter().cloned())?;
connection.seek_offset += actual_count;
}
Err(s) => responder.send(s.into_raw(), &mut std::iter::empty())?,
}
}
},
FileRequest::ReadAt { count, offset: read_offset, responder } => {
match &self.nodes[connection.position] {
ServiceFsNode::Directory { .. } | ServiceFsNode::Service(_) => {
panic!("read-at on non-file node")
}
ServiceFsNode::VmoFile { vmo, length, offset } => {
let length = *length;
let offset = *offset;
let actual_offset = min(offset.saturating_add(read_offset), length);
let actual_count = min(count, length.saturating_sub(actual_offset));
let mut data = vec![0; actual_count as usize];
let status = vmo.read(&mut data, actual_offset);
match status {
Ok(()) => responder.send(zx::sys::ZX_OK, &mut data.iter().cloned())?,
Err(s) => responder.send(s.into_raw(), &mut std::iter::empty())?,
}
}
}
}
FileRequest::Write { responder, .. } => unsupported!(responder, 0)?,
FileRequest::WriteAt { responder, .. } => unsupported!(responder, 0)?,
FileRequest::Seek { offset, start, responder } => {
let start = match start {
SeekOrigin::Start => 0,
SeekOrigin::Current => connection.seek_offset,
SeekOrigin::End => match &self.nodes[connection.position] {
ServiceFsNode::Directory { .. } | ServiceFsNode::Service(_) => {
panic!("seek on non-file node")
}
ServiceFsNode::VmoFile { length, .. } => *length,
},
};
let new_offset: u64 = if offset.is_positive() {
start.saturating_add(offset as u64)
} else if offset == i64::min_value() {
0
} else {
start.saturating_sub(offset.abs() as u64)
};
connection.seek_offset = new_offset;
responder.send(zx::sys::ZX_OK, new_offset)?;
}
FileRequest::Truncate { responder, .. } => unsupported!(responder)?,
FileRequest::GetFlags { responder, .. } => unsupported!(responder, 0)?,
FileRequest::SetFlags { responder, .. } => unsupported!(responder)?,
FileRequest::GetBuffer { responder, .. } => unsupported!(responder, None)?,
}
Ok(ConnectionState::Open)
}
fn handle_node_request(
&mut self,
request: NodeRequest,
connection: &mut NodeConnectionData,
) -> Result<ConnectionState, Error> {
match request {
NodeRequest::Clone { flags, object, control_handle: _ } => {
if flags & OPEN_FLAG_NODE_REFERENCE == 0 {
// we cannot connect the object-- it is requesting more than a
// node reference, which is not allowed from within a node reference.
return Ok(ConnectionState::Open);
}
match self.handle_clone(flags, object, connection.position) {
Some(_) => panic!("cloning node connection should not return output"),
None => {}
}
}
NodeRequest::Close { responder } => {
responder.send(zx::sys::ZX_OK)?;
return Ok(ConnectionState::Closed);
}
NodeRequest::Describe { responder } => {
let mut info = self.describe_node(connection.position)?;
responder.send(&mut info)?;
}
NodeRequest::Sync { responder } => unsupported!(responder)?,
NodeRequest::GetAttr { responder } => {
let mut attrs = self.node_attrs(connection.position);
responder.send(zx::sys::ZX_OK, &mut attrs)?
}
NodeRequest::SetAttr { responder, .. } => unsupported!(responder)?,
NodeRequest::Ioctl { responder, .. } => {
unsupported!(responder, &mut std::iter::empty(), &mut std::iter::empty())?
}
}
Ok(ConnectionState::Open)
}
fn describe_node(&self, pos: usize) -> Result<NodeInfo, Error> {
Ok(match self.nodes.get(pos).expect("describe on missing node") {
ServiceFsNode::Directory { .. } => NodeInfo::Directory(DirectoryObject),
ServiceFsNode::Service(..) => NodeInfo::Service(fidl_fuchsia_io::Service),
ServiceFsNode::VmoFile { vmo, offset, length } => {
let vmo = vmo
.duplicate_handle(zx::Rights::SAME_RIGHTS)
.context("error duplicating VmoFile handle in describe_node")?;
let (offset, length) = (*offset, *length);
NodeInfo::Vmofile(fidl_fuchsia_io::Vmofile { vmo, offset, length })
}
})
}
fn node_attrs(&self, pos: usize) -> NodeAttributes {
let mut attrs = NodeAttributes {
mode: libc::S_IRUSR,
id: fidl_fuchsia_io::INO_UNKNOWN,
content_size: 0,
storage_size: 0,
link_count: 1,
creation_time: 0,
modification_time: 0,
};
match self.nodes.get(pos).expect("attrs on missing node") {
ServiceFsNode::Directory { .. } => {
attrs.mode |= fidl_fuchsia_io::MODE_TYPE_DIRECTORY;
}
ServiceFsNode::VmoFile { vmo: _, offset: _, length } => {
attrs.mode |= fidl_fuchsia_io::MODE_TYPE_FILE;
attrs.content_size = *length;
attrs.storage_size = *length;
}
ServiceFsNode::Service(_) => {
attrs.mode |= fidl_fuchsia_io::MODE_TYPE_SERVICE;
}
}
attrs
}
/// Traverse directory listings at |path| starting from node |start_pos|, returning either
/// the index of the local directory given by the path or a reference to a proxy for a remote
/// directory, along with the remaining parts of the path.
fn descend(&self, start_pos: usize, path: Option<&str>) -> Result<DescendResult, Error> {
let mut pos = start_pos;
if let Some(path) = path {
for (index, segment) in path.split("/").enumerate() {
let children = self.children_for_dir(pos)?;
match children.get(segment) {
Some(next_pos) => pos = *next_pos,
_ => bail!("segment not found: {}", segment),
}
match self.nodes.get(pos).expect(&format!("missing node {}", pos)) {
ServiceFsNode::Directory(Directory::Remote(proxy)) => {
return Ok(DescendResult::RemoteDir((
&proxy,
path.split("/").skip(index + 1).collect::<Vec<&str>>().join("/"),
)));
}
_ => {}
}
}
}
Ok(DescendResult::LocalChildren(self.children_for_dir(pos)?))
}
/// Retrieve directory listing of node |pos|. Return an error if |pos| is not a directory
/// node
fn children_for_dir(&self, pos: usize) -> Result<&HashMap<String, usize>, Error> {
let node = self.nodes.get(pos).expect(&format!("missing node {}", pos));
match node {
ServiceFsNode::Directory(Directory::Local { children }) => Ok(children),
_ => bail!("node not a directory: {}", pos),
}
}
fn to_dirent_bytes(&self, nodes: &HashMap<String, usize>) -> Vec<u8> {
let mut buf = vec![];
for (name, node) in nodes.iter() {
let typ = self.nodes.get(*node).expect("missing child").to_dirent_type();
if let Err(e) = write_dirent_bytes(&mut buf, *node as u64, typ, name) {
eprintln!("failed encoding dirent for node {}: {}", *node, e);
}
}
buf
}
}
fn write_dirent_bytes(buf: &mut Vec<u8>, ino: u64, typ: u8, name: &str) -> Result<(), Error> {
// Safe to unwrap since `Write::write` on a `Vec` should never fail.
buf.write_u64::<LittleEndian>(ino).unwrap();
buf.write_u8(name.len() as u8).unwrap();
buf.write_u8(typ as u8).unwrap();
buf.write(name.as_ref()).unwrap();
Ok(())
}
impl<ServiceObjTy: ServiceObjTrait> Unpin for ServiceFs<ServiceObjTy> {}
struct PollState {
// `true` if *any* items so far have made progress.
made_progress: bool,
// Only `true` if *all* items so far are complete.
is_complete: bool,
}
impl Default for PollState {
fn default() -> Self {
Self { made_progress: false, is_complete: true }
}
}
impl PollState {
const NO_PROGRESS: PollState = PollState { made_progress: false, is_complete: false };
const SOME_PROGRESS: PollState = PollState { made_progress: true, is_complete: false };
const COMPLETE: PollState = PollState { made_progress: false, is_complete: true };
fn merge(&mut self, other: PollState) {
self.made_progress |= other.made_progress;
self.is_complete &= other.is_complete;
}
}
// FIXME(cramertj) it'd be nice to abstract away the common
// bits of these two functions.
impl<ServiceObjTy: ServiceObjTrait> ServiceFs<ServiceObjTy> {
fn poll_serve_dir_connection(
&mut self,
cx: &mut Context<'_>,
) -> (Option<ServiceObjTy::Output>, PollState) {
let (request, dir_stream, mut dir_connection_data) =
match self.dir_connections.poll_next_unpin(cx) {
// a client request
Poll::Ready(Some(Some(x))) => x,
// this client_connection has terminated
Poll::Ready(Some(None)) => return (None, PollState::SOME_PROGRESS),
// all client connections have terminated
Poll::Ready(None) => return (None, PollState::COMPLETE),
Poll::Pending => return (None, PollState::NO_PROGRESS),
};
let request = match request {
Ok(request) => request,
Err(e) => {
eprintln!("ServiceFs failed to parse an incoming directory request: {:?}", e);
return (None, PollState::SOME_PROGRESS);
}
};
match self.handle_dir_request(request, &mut dir_connection_data) {
Ok((value, connection_state)) => {
if let ConnectionState::Open = connection_state {
// Requeue the client to receive new requests
self.dir_connections.push(DirConnection::new(dir_stream, dir_connection_data));
}
(value, PollState::SOME_PROGRESS)
}
Err(e) => {
eprintln!("ServiceFs failed to handle an incoming directory request: {:?}", e);
(None, PollState::SOME_PROGRESS)
}
}
}
fn poll_serve_file_connection(&mut self, cx: &mut Context<'_>) -> PollState {
let (request, file_stream, mut file_connection_data) =
match self.file_connections.poll_next_unpin(cx) {
// a client request
Poll::Ready(Some(Some(x))) => x,
// This client connection has terminated
Poll::Ready(Some(None)) => return PollState::SOME_PROGRESS,
// all client connections have terminated
Poll::Ready(None) => return PollState::COMPLETE,
Poll::Pending => return PollState::NO_PROGRESS,
};
let request = match request {
Ok(request) => request,
Err(e) => {
eprintln!("ServiceFs failed to parse an incoming file request: {:?}", e);
return PollState::SOME_PROGRESS;
}
};
match self.handle_file_request(request, &mut file_connection_data) {
Ok(ConnectionState::Open) => {
// Requeue the client to receive new requests
self.file_connections.push(FileConnection::new(file_stream, file_connection_data));
}
Ok(ConnectionState::Closed) => {}
Err(e) => {
eprintln!("ServiceFs failed to handle an incoming file request: {:?}", e);
}
}
PollState::SOME_PROGRESS
}
fn poll_serve_node_connection(&mut self, cx: &mut Context<'_>) -> PollState {
let (request, node_stream, mut node_connection_data) =
match self.node_connections.poll_next_unpin(cx) {
// a client request
Poll::Ready(Some(Some(x))) => x,
// This client connection has terminated
Poll::Ready(Some(None)) => return PollState::SOME_PROGRESS,
// all client connections have terminated
Poll::Ready(None) => return PollState::COMPLETE,
Poll::Pending => return PollState::NO_PROGRESS,
};
let request = match request {
Ok(request) => request,
Err(e) => {
eprintln!("ServiceFs failed to parse an incoming node request: {:?}", e);
return PollState::SOME_PROGRESS;
}
};
match self.handle_node_request(request, &mut node_connection_data) {
Ok(ConnectionState::Open) => {
// Requeue the client to receive new requests
self.node_connections.push(NodeConnection::new(node_stream, node_connection_data));
}
Ok(ConnectionState::Closed) => {}
Err(e) => {
eprintln!("ServiceFs failed to handle an incoming node request: {:?}", e);
}
}
PollState::SOME_PROGRESS
}
}
impl<ServiceObjTy: ServiceObjTrait> Stream for ServiceFs<ServiceObjTy> {
type Item = ServiceObjTy::Output;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
loop {
let mut iter_state = PollState::default();
let (output, state) = self.poll_serve_dir_connection(cx);
if let Some(output) = output {
return Poll::Ready(Some(output));
}
iter_state.merge(state);
let state = self.poll_serve_file_connection(cx);
iter_state.merge(state);
let state = self.poll_serve_node_connection(cx);
iter_state.merge(state);
// Return `None` to end the stream if all connections are done being served.
if iter_state.is_complete {
return Poll::Ready(None);
}
// Otherwise, return `Pending` if no new requests were available to serve.
if !iter_state.made_progress {
return Poll::Pending;
}
}
}
}
| 38.904023 | 123 | 0.578591 |
01388b92615c8097eb0499c0e471381c0d386da7 | 1,549 | use crate::models::Window;
use crate::models::WindowHandle;
use serde::{Deserialize, Serialize};
/// These are responses from the Window manager.
/// The display server should act on these actions.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum DisplayAction {
/// Nicely ask a window if it would please close at its convenience.
KillWindow(WindowHandle),
/// Get triggered after a new window is discovered and WE are
/// managing it.
AddedWindow(WindowHandle),
/// Makes sure the mouse is over a given window.
MoveMouseOver(WindowHandle),
/// Makes sure the mouse is over a given point.
MoveMouseOverPoint((i32, i32)),
/// Send a window to the top move location.
MoveToTop(WindowHandle),
/// Tell the DS we no longer care about the this window and other
/// cleanup.
DestroyedWindow(WindowHandle),
/// Tell a window that it is to become focused.
WindowTakeFocus(Window),
/// Tell the DM we are going to resize a window and only send that
/// type of events.
StartResizingWindow(WindowHandle),
/// Tell the DM we are going to move a window and only send that
/// type of events.
StartMovingWindow(WindowHandle),
/// Used to let the WM know of the current displayed tag changes.
SetCurrentTags(String),
/// Used to let the WM know of the tag for a given window.
SetWindowTags(WindowHandle, String),
/// Tell the DM to return to normal mode if it is not (ie resize a
/// window or moving a window).
NormalMode,
}
| 30.98 | 72 | 0.688832 |
e428fc927f05cf925caded37c7dfa77a947a0c63 | 76,251 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The region check is a final pass that runs over the AST after we have
//! inferred the type constraints but before we have actually finalized
//! the types. Its purpose is to embed a variety of region constraints.
//! Inserting these constraints as a separate pass is good because (1) it
//! localizes the code that has to do with region inference and (2) often
//! we cannot know what constraints are needed until the basic types have
//! been inferred.
//!
//! ### Interaction with the borrow checker
//!
//! In general, the job of the borrowck module (which runs later) is to
//! check that all soundness criteria are met, given a particular set of
//! regions. The job of *this* module is to anticipate the needs of the
//! borrow checker and infer regions that will satisfy its requirements.
//! It is generally true that the inference doesn't need to be sound,
//! meaning that if there is a bug and we inferred bad regions, the borrow
//! checker should catch it. This is not entirely true though; for
//! example, the borrow checker doesn't check subtyping, and it doesn't
//! check that region pointers are always live when they are used. It
//! might be worthwhile to fix this so that borrowck serves as a kind of
//! verification step -- that would add confidence in the overall
//! correctness of the compiler, at the cost of duplicating some type
//! checks and effort.
//!
//! ### Inferring the duration of borrows, automatic and otherwise
//!
//! Whenever we introduce a borrowed pointer, for example as the result of
//! a borrow expression `let x = &data`, the lifetime of the pointer `x`
//! is always specified as a region inference variable. `regionck` has the
//! job of adding constraints such that this inference variable is as
//! narrow as possible while still accommodating all uses (that is, every
//! dereference of the resulting pointer must be within the lifetime).
//!
//! #### Reborrows
//!
//! Generally speaking, `regionck` does NOT try to ensure that the data
//! `data` will outlive the pointer `x`. That is the job of borrowck. The
//! one exception is when "re-borrowing" the contents of another borrowed
//! pointer. For example, imagine you have a borrowed pointer `b` with
//! lifetime L1 and you have an expression `&*b`. The result of this
//! expression will be another borrowed pointer with lifetime L2 (which is
//! an inference variable). The borrow checker is going to enforce the
//! constraint that L2 < L1, because otherwise you are re-borrowing data
//! for a lifetime larger than the original loan. However, without the
//! routines in this module, the region inferencer would not know of this
//! dependency and thus it might infer the lifetime of L2 to be greater
//! than L1 (issue #3148).
//!
//! There are a number of troublesome scenarios in the tests
//! `region-dependent-*.rs`, but here is one example:
//!
//! struct Foo { i: i32 }
//! struct Bar { foo: Foo }
//! fn get_i<'a>(x: &'a Bar) -> &'a i32 {
//! let foo = &x.foo; // Lifetime L1
//! &foo.i // Lifetime L2
//! }
//!
//! Note that this comes up either with `&` expressions, `ref`
//! bindings, and `autorefs`, which are the three ways to introduce
//! a borrow.
//!
//! The key point here is that when you are borrowing a value that
//! is "guaranteed" by a borrowed pointer, you must link the
//! lifetime of that borrowed pointer (L1, here) to the lifetime of
//! the borrow itself (L2). What do I mean by "guaranteed" by a
//! borrowed pointer? I mean any data that is reached by first
//! dereferencing a borrowed pointer and then either traversing
//! interior offsets or boxes. We say that the guarantor
//! of such data is the region of the borrowed pointer that was
//! traversed. This is essentially the same as the ownership
//! relation, except that a borrowed pointer never owns its
//! contents.
use astconv::AstConv;
use check::dropck;
use check::FnCtxt;
use middle::free_region::FreeRegionMap;
use middle::mem_categorization as mc;
use middle::mem_categorization::Categorization;
use middle::region::{self, CodeExtent};
use middle::subst::Substs;
use middle::traits;
use middle::ty::{self, Ty, TyCtxt, MethodCall, TypeFoldable};
use middle::infer::{self, GenericKind, InferCtxt, SubregionOrigin, TypeOrigin, VerifyBound};
use middle::pat_util;
use middle::ty::adjustment;
use middle::ty::wf::ImpliedBound;
use std::mem;
use syntax::ast;
use syntax::codemap::Span;
use rustc_front::intravisit::{self, Visitor};
use rustc_front::hir::{self, PatKind};
use rustc_front::util as hir_util;
use self::SubjectNode::Subject;
// a variation on try that just returns unit
macro_rules! ignore_err {
($e:expr) => (match $e { Ok(e) => e, Err(_) => return () })
}
///////////////////////////////////////////////////////////////////////////
// PUBLIC ENTRY POINTS
pub fn regionck_expr(fcx: &FnCtxt, e: &hir::Expr) {
let mut rcx = Rcx::new(fcx, RepeatingScope(e.id), e.id, Subject(e.id));
if fcx.err_count_since_creation() == 0 {
// regionck assumes typeck succeeded
rcx.visit_expr(e);
rcx.visit_region_obligations(e.id);
}
rcx.resolve_regions_and_report_errors();
}
/// Region checking during the WF phase for items. `wf_tys` are the
/// types from which we should derive implied bounds, if any.
pub fn regionck_item<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
item_id: ast::NodeId,
span: Span,
wf_tys: &[Ty<'tcx>]) {
debug!("regionck_item(item.id={:?}, wf_tys={:?}", item_id, wf_tys);
let mut rcx = Rcx::new(fcx, RepeatingScope(item_id), item_id, Subject(item_id));
let tcx = fcx.tcx();
rcx.free_region_map
.relate_free_regions_from_predicates(tcx, &fcx.infcx().parameter_environment.caller_bounds);
rcx.relate_free_regions(wf_tys, item_id, span);
rcx.visit_region_obligations(item_id);
rcx.resolve_regions_and_report_errors();
}
pub fn regionck_fn(fcx: &FnCtxt,
fn_id: ast::NodeId,
fn_span: Span,
decl: &hir::FnDecl,
blk: &hir::Block) {
debug!("regionck_fn(id={})", fn_id);
let mut rcx = Rcx::new(fcx, RepeatingScope(blk.id), blk.id, Subject(fn_id));
if fcx.err_count_since_creation() == 0 {
// regionck assumes typeck succeeded
rcx.visit_fn_body(fn_id, decl, blk, fn_span);
}
let tcx = fcx.tcx();
rcx.free_region_map
.relate_free_regions_from_predicates(tcx, &fcx.infcx().parameter_environment.caller_bounds);
rcx.resolve_regions_and_report_errors();
// For the top-level fn, store the free-region-map. We don't store
// any map for closures; they just share the same map as the
// function that created them.
fcx.tcx().store_free_region_map(fn_id, rcx.free_region_map);
}
///////////////////////////////////////////////////////////////////////////
// INTERNALS
pub struct Rcx<'a, 'tcx: 'a> {
pub fcx: &'a FnCtxt<'a, 'tcx>,
region_bound_pairs: Vec<(ty::Region, GenericKind<'tcx>)>,
free_region_map: FreeRegionMap,
// id of innermost fn body id
body_id: ast::NodeId,
// call_site scope of innermost fn
call_site_scope: Option<CodeExtent>,
// id of innermost fn or loop
repeating_scope: ast::NodeId,
// id of AST node being analyzed (the subject of the analysis).
subject: SubjectNode,
}
pub struct RepeatingScope(ast::NodeId);
pub enum SubjectNode { Subject(ast::NodeId), None }
impl<'a, 'tcx> Rcx<'a, 'tcx> {
pub fn new(fcx: &'a FnCtxt<'a, 'tcx>,
initial_repeating_scope: RepeatingScope,
initial_body_id: ast::NodeId,
subject: SubjectNode) -> Rcx<'a, 'tcx> {
let RepeatingScope(initial_repeating_scope) = initial_repeating_scope;
Rcx { fcx: fcx,
repeating_scope: initial_repeating_scope,
body_id: initial_body_id,
call_site_scope: None,
subject: subject,
region_bound_pairs: Vec::new(),
free_region_map: FreeRegionMap::new(),
}
}
pub fn tcx(&self) -> &'a TyCtxt<'tcx> {
self.fcx.ccx.tcx
}
pub fn infcx(&self) -> &InferCtxt<'a,'tcx> {
self.fcx.infcx()
}
fn set_call_site_scope(&mut self, call_site_scope: Option<CodeExtent>) -> Option<CodeExtent> {
mem::replace(&mut self.call_site_scope, call_site_scope)
}
fn set_body_id(&mut self, body_id: ast::NodeId) -> ast::NodeId {
mem::replace(&mut self.body_id, body_id)
}
fn set_repeating_scope(&mut self, scope: ast::NodeId) -> ast::NodeId {
mem::replace(&mut self.repeating_scope, scope)
}
/// Try to resolve the type for the given node, returning t_err if an error results. Note that
/// we never care about the details of the error, the same error will be detected and reported
/// in the writeback phase.
///
/// Note one important point: we do not attempt to resolve *region variables* here. This is
/// because regionck is essentially adding constraints to those region variables and so may yet
/// influence how they are resolved.
///
/// Consider this silly example:
///
/// ```
/// fn borrow(x: &i32) -> &i32 {x}
/// fn foo(x: @i32) -> i32 { // block: B
/// let b = borrow(x); // region: <R0>
/// *b
/// }
/// ```
///
/// Here, the region of `b` will be `<R0>`. `<R0>` is constrained to be some subregion of the
/// block B and some superregion of the call. If we forced it now, we'd choose the smaller
/// region (the call). But that would make the *b illegal. Since we don't resolve, the type
/// of b will be `&<R0>.i32` and then `*b` will require that `<R0>` be bigger than the let and
/// the `*b` expression, so we will effectively resolve `<R0>` to be the block B.
pub fn resolve_type(&self, unresolved_ty: Ty<'tcx>) -> Ty<'tcx> {
self.fcx.infcx().resolve_type_vars_if_possible(&unresolved_ty)
}
/// Try to resolve the type for the given node.
fn resolve_node_type(&self, id: ast::NodeId) -> Ty<'tcx> {
let t = self.fcx.node_ty(id);
self.resolve_type(t)
}
fn resolve_method_type(&self, method_call: MethodCall) -> Option<Ty<'tcx>> {
let method_ty = self.fcx.inh.tables.borrow().method_map
.get(&method_call).map(|method| method.ty);
method_ty.map(|method_ty| self.resolve_type(method_ty))
}
/// Try to resolve the type for the given node.
pub fn resolve_expr_type_adjusted(&mut self, expr: &hir::Expr) -> Ty<'tcx> {
let ty_unadjusted = self.resolve_node_type(expr.id);
if ty_unadjusted.references_error() {
ty_unadjusted
} else {
ty_unadjusted.adjust(
self.fcx.tcx(), expr.span, expr.id,
self.fcx.inh.tables.borrow().adjustments.get(&expr.id),
|method_call| self.resolve_method_type(method_call))
}
}
fn visit_fn_body(&mut self,
id: ast::NodeId, // the id of the fn itself
fn_decl: &hir::FnDecl,
body: &hir::Block,
span: Span)
{
// When we enter a function, we can derive
debug!("visit_fn_body(id={})", id);
let call_site = self.fcx.tcx().region_maps.lookup_code_extent(
region::CodeExtentData::CallSiteScope { fn_id: id, body_id: body.id });
let old_call_site_scope = self.set_call_site_scope(Some(call_site));
let fn_sig = {
let fn_sig_map = &self.infcx().tables.borrow().liberated_fn_sigs;
match fn_sig_map.get(&id) {
Some(f) => f.clone(),
None => {
self.tcx().sess.bug(
&format!("No fn-sig entry for id={}", id));
}
}
};
let old_region_bounds_pairs_len = self.region_bound_pairs.len();
// Collect the types from which we create inferred bounds.
// For the return type, if diverging, substitute `bool` just
// because it will have no effect.
//
// FIXME(#27579) return types should not be implied bounds
let fn_sig_tys: Vec<_> =
fn_sig.inputs.iter()
.cloned()
.chain(Some(fn_sig.output.unwrap_or(self.tcx().types.bool)))
.collect();
let old_body_id = self.set_body_id(body.id);
self.relate_free_regions(&fn_sig_tys[..], body.id, span);
link_fn_args(self,
self.tcx().region_maps.node_extent(body.id),
&fn_decl.inputs[..]);
self.visit_block(body);
self.visit_region_obligations(body.id);
let call_site_scope = self.call_site_scope.unwrap();
debug!("visit_fn_body body.id {} call_site_scope: {:?}",
body.id, call_site_scope);
type_of_node_must_outlive(self,
infer::CallReturn(span),
body.id,
ty::ReScope(call_site_scope));
self.region_bound_pairs.truncate(old_region_bounds_pairs_len);
self.set_body_id(old_body_id);
self.set_call_site_scope(old_call_site_scope);
}
fn visit_region_obligations(&mut self, node_id: ast::NodeId)
{
debug!("visit_region_obligations: node_id={}", node_id);
// region checking can introduce new pending obligations
// which, when processed, might generate new region
// obligations. So make sure we process those.
self.fcx.select_all_obligations_or_error();
// Make a copy of the region obligations vec because we'll need
// to be able to borrow the fulfillment-cx below when projecting.
let region_obligations =
self.fcx
.inh
.fulfillment_cx
.borrow()
.region_obligations(node_id)
.to_vec();
for r_o in ®ion_obligations {
debug!("visit_region_obligations: r_o={:?} cause={:?}",
r_o, r_o.cause);
let sup_type = self.resolve_type(r_o.sup_type);
let origin = self.code_to_origin(r_o.cause.span, sup_type, &r_o.cause.code);
type_must_outlive(self, origin, sup_type, r_o.sub_region);
}
// Processing the region obligations should not cause the list to grow further:
assert_eq!(region_obligations.len(),
self.fcx.inh.fulfillment_cx.borrow().region_obligations(node_id).len());
}
fn code_to_origin(&self,
span: Span,
sup_type: Ty<'tcx>,
code: &traits::ObligationCauseCode<'tcx>)
-> SubregionOrigin<'tcx> {
match *code {
traits::ObligationCauseCode::ReferenceOutlivesReferent(ref_type) =>
infer::ReferenceOutlivesReferent(ref_type, span),
_ =>
infer::RelateParamBound(span, sup_type),
}
}
/// This method populates the region map's `free_region_map`. It walks over the transformed
/// argument and return types for each function just before we check the body of that function,
/// looking for types where you have a borrowed pointer to other borrowed data (e.g., `&'a &'b
/// [usize]`. We do not allow references to outlive the things they point at, so we can assume
/// that `'a <= 'b`. This holds for both the argument and return types, basically because, on
/// the caller side, the caller is responsible for checking that the type of every expression
/// (including the actual values for the arguments, as well as the return type of the fn call)
/// is well-formed.
///
/// Tests: `src/test/compile-fail/regions-free-region-ordering-*.rs`
fn relate_free_regions(&mut self,
fn_sig_tys: &[Ty<'tcx>],
body_id: ast::NodeId,
span: Span) {
debug!("relate_free_regions >>");
for &ty in fn_sig_tys {
let ty = self.resolve_type(ty);
debug!("relate_free_regions(t={:?})", ty);
let implied_bounds = ty::wf::implied_bounds(self.fcx.infcx(), body_id, ty, span);
// Record any relations between free regions that we observe into the free-region-map.
self.free_region_map.relate_free_regions_from_implied_bounds(&implied_bounds);
// But also record other relationships, such as `T:'x`,
// that don't go into the free-region-map but which we use
// here.
for implication in implied_bounds {
debug!("implication: {:?}", implication);
match implication {
ImpliedBound::RegionSubRegion(ty::ReFree(free_a),
ty::ReVar(vid_b)) => {
self.fcx.inh.infcx.add_given(free_a, vid_b);
}
ImpliedBound::RegionSubParam(r_a, param_b) => {
self.region_bound_pairs.push((r_a, GenericKind::Param(param_b)));
}
ImpliedBound::RegionSubProjection(r_a, projection_b) => {
self.region_bound_pairs.push((r_a, GenericKind::Projection(projection_b)));
}
ImpliedBound::RegionSubRegion(..) => {
// In principle, we could record (and take
// advantage of) every relationship here, but
// we are also free not to -- it simply means
// strictly less that we can successfully type
// check. (It may also be that we should
// revise our inference system to be more
// general and to make use of *every*
// relationship that arises here, but
// presently we do not.)
}
}
}
}
debug!("<< relate_free_regions");
}
fn resolve_regions_and_report_errors(&self) {
let subject_node_id = match self.subject {
Subject(s) => s,
SubjectNode::None => {
self.tcx().sess.bug("cannot resolve_regions_and_report_errors \
without subject node");
}
};
self.fcx.infcx().resolve_regions_and_report_errors(&self.free_region_map,
subject_node_id);
}
}
impl<'a, 'tcx, 'v> Visitor<'v> for Rcx<'a, 'tcx> {
// (..) FIXME(#3238) should use visit_pat, not visit_arm/visit_local,
// However, right now we run into an issue whereby some free
// regions are not properly related if they appear within the
// types of arguments that must be inferred. This could be
// addressed by deferring the construction of the region
// hierarchy, and in particular the relationships between free
// regions, until regionck, as described in #3238.
fn visit_fn(&mut self, _fk: intravisit::FnKind<'v>, fd: &'v hir::FnDecl,
b: &'v hir::Block, span: Span, id: ast::NodeId) {
self.visit_fn_body(id, fd, b, span)
}
fn visit_expr(&mut self, ex: &hir::Expr) { visit_expr(self, ex); }
//visit_pat: visit_pat, // (..) see above
fn visit_arm(&mut self, a: &hir::Arm) { visit_arm(self, a); }
fn visit_local(&mut self, l: &hir::Local) { visit_local(self, l); }
fn visit_block(&mut self, b: &hir::Block) { visit_block(self, b); }
}
fn visit_block(rcx: &mut Rcx, b: &hir::Block) {
intravisit::walk_block(rcx, b);
}
fn visit_arm(rcx: &mut Rcx, arm: &hir::Arm) {
// see above
for p in &arm.pats {
constrain_bindings_in_pat(&p, rcx);
}
intravisit::walk_arm(rcx, arm);
}
fn visit_local(rcx: &mut Rcx, l: &hir::Local) {
// see above
constrain_bindings_in_pat(&l.pat, rcx);
link_local(rcx, l);
intravisit::walk_local(rcx, l);
}
fn constrain_bindings_in_pat(pat: &hir::Pat, rcx: &mut Rcx) {
let tcx = rcx.fcx.tcx();
debug!("regionck::visit_pat(pat={:?})", pat);
pat_util::pat_bindings(&tcx.def_map, pat, |_, id, span, _| {
// If we have a variable that contains region'd data, that
// data will be accessible from anywhere that the variable is
// accessed. We must be wary of loops like this:
//
// // from src/test/compile-fail/borrowck-lend-flow.rs
// let mut v = box 3, w = box 4;
// let mut x = &mut w;
// loop {
// **x += 1; // (2)
// borrow(v); //~ ERROR cannot borrow
// x = &mut v; // (1)
// }
//
// Typically, we try to determine the region of a borrow from
// those points where it is dereferenced. In this case, one
// might imagine that the lifetime of `x` need only be the
// body of the loop. But of course this is incorrect because
// the pointer that is created at point (1) is consumed at
// point (2), meaning that it must be live across the loop
// iteration. The easiest way to guarantee this is to require
// that the lifetime of any regions that appear in a
// variable's type enclose at least the variable's scope.
let var_scope = tcx.region_maps.var_scope(id);
let origin = infer::BindingTypeIsNotValidAtDecl(span);
type_of_node_must_outlive(rcx, origin, id, ty::ReScope(var_scope));
let typ = rcx.resolve_node_type(id);
dropck::check_safety_of_destructor_if_necessary(rcx, typ, span, var_scope);
})
}
fn visit_expr(rcx: &mut Rcx, expr: &hir::Expr) {
debug!("regionck::visit_expr(e={:?}, repeating_scope={})",
expr, rcx.repeating_scope);
// No matter what, the type of each expression must outlive the
// scope of that expression. This also guarantees basic WF.
let expr_ty = rcx.resolve_node_type(expr.id);
// the region corresponding to this expression
let expr_region = ty::ReScope(rcx.tcx().region_maps.node_extent(expr.id));
type_must_outlive(rcx, infer::ExprTypeIsNotInScope(expr_ty, expr.span),
expr_ty, expr_region);
let method_call = MethodCall::expr(expr.id);
let opt_method_callee = rcx.fcx.inh.tables.borrow().method_map.get(&method_call).cloned();
let has_method_map = opt_method_callee.is_some();
// If we are calling a method (either explicitly or via an
// overloaded operator), check that all of the types provided as
// arguments for its type parameters are well-formed, and all the regions
// provided as arguments outlive the call.
if let Some(callee) = opt_method_callee {
let origin = match expr.node {
hir::ExprMethodCall(..) =>
infer::ParameterOrigin::MethodCall,
hir::ExprUnary(op, _) if op == hir::UnDeref =>
infer::ParameterOrigin::OverloadedDeref,
_ =>
infer::ParameterOrigin::OverloadedOperator
};
substs_wf_in_scope(rcx, origin, &callee.substs, expr.span, expr_region);
type_must_outlive(rcx, infer::ExprTypeIsNotInScope(callee.ty, expr.span),
callee.ty, expr_region);
}
// Check any autoderefs or autorefs that appear.
let adjustment = rcx.fcx.inh.tables.borrow().adjustments.get(&expr.id).map(|a| a.clone());
if let Some(adjustment) = adjustment {
debug!("adjustment={:?}", adjustment);
match adjustment {
adjustment::AdjustDerefRef(adjustment::AutoDerefRef {
autoderefs, ref autoref, ..
}) => {
let expr_ty = rcx.resolve_node_type(expr.id);
constrain_autoderefs(rcx, expr, autoderefs, expr_ty);
if let Some(ref autoref) = *autoref {
link_autoref(rcx, expr, autoderefs, autoref);
// Require that the resulting region encompasses
// the current node.
//
// FIXME(#6268) remove to support nested method calls
type_of_node_must_outlive(
rcx, infer::AutoBorrow(expr.span),
expr.id, expr_region);
}
}
/*
adjustment::AutoObject(_, ref bounds, _, _) => {
// Determine if we are casting `expr` to a trait
// instance. If so, we have to be sure that the type
// of the source obeys the new region bound.
let source_ty = rcx.resolve_node_type(expr.id);
type_must_outlive(rcx, infer::RelateObjectBound(expr.span),
source_ty, bounds.region_bound);
}
*/
_ => {}
}
// If necessary, constrain destructors in the unadjusted form of this
// expression.
let cmt_result = {
let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
mc.cat_expr_unadjusted(expr)
};
match cmt_result {
Ok(head_cmt) => {
check_safety_of_rvalue_destructor_if_necessary(rcx,
head_cmt,
expr.span);
}
Err(..) => {
let tcx = rcx.fcx.tcx();
tcx.sess.delay_span_bug(expr.span, "cat_expr_unadjusted Errd");
}
}
}
// If necessary, constrain destructors in this expression. This will be
// the adjusted form if there is an adjustment.
let cmt_result = {
let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
mc.cat_expr(expr)
};
match cmt_result {
Ok(head_cmt) => {
check_safety_of_rvalue_destructor_if_necessary(rcx, head_cmt, expr.span);
}
Err(..) => {
let tcx = rcx.fcx.tcx();
tcx.sess.delay_span_bug(expr.span, "cat_expr Errd");
}
}
debug!("regionck::visit_expr(e={:?}, repeating_scope={}) - visiting subexprs",
expr, rcx.repeating_scope);
match expr.node {
hir::ExprPath(..) => {
rcx.fcx.opt_node_ty_substs(expr.id, |item_substs| {
let origin = infer::ParameterOrigin::Path;
substs_wf_in_scope(rcx, origin, &item_substs.substs, expr.span, expr_region);
});
}
hir::ExprCall(ref callee, ref args) => {
if has_method_map {
constrain_call(rcx, expr, Some(&callee),
args.iter().map(|e| &**e), false);
} else {
constrain_callee(rcx, callee.id, expr, &callee);
constrain_call(rcx, expr, None,
args.iter().map(|e| &**e), false);
}
intravisit::walk_expr(rcx, expr);
}
hir::ExprMethodCall(_, _, ref args) => {
constrain_call(rcx, expr, Some(&args[0]),
args[1..].iter().map(|e| &**e), false);
intravisit::walk_expr(rcx, expr);
}
hir::ExprAssignOp(_, ref lhs, ref rhs) => {
if has_method_map {
constrain_call(rcx, expr, Some(&lhs),
Some(&**rhs).into_iter(), false);
}
intravisit::walk_expr(rcx, expr);
}
hir::ExprIndex(ref lhs, ref rhs) if has_method_map => {
constrain_call(rcx, expr, Some(&lhs),
Some(&**rhs).into_iter(), true);
intravisit::walk_expr(rcx, expr);
},
hir::ExprBinary(op, ref lhs, ref rhs) if has_method_map => {
let implicitly_ref_args = !hir_util::is_by_value_binop(op.node);
// As `expr_method_call`, but the call is via an
// overloaded op. Note that we (sadly) currently use an
// implicit "by ref" sort of passing style here. This
// should be converted to an adjustment!
constrain_call(rcx, expr, Some(&lhs),
Some(&**rhs).into_iter(), implicitly_ref_args);
intravisit::walk_expr(rcx, expr);
}
hir::ExprBinary(_, ref lhs, ref rhs) => {
// If you do `x OP y`, then the types of `x` and `y` must
// outlive the operation you are performing.
let lhs_ty = rcx.resolve_expr_type_adjusted(&lhs);
let rhs_ty = rcx.resolve_expr_type_adjusted(&rhs);
for &ty in &[lhs_ty, rhs_ty] {
type_must_outlive(rcx,
infer::Operand(expr.span),
ty,
expr_region);
}
intravisit::walk_expr(rcx, expr);
}
hir::ExprUnary(op, ref lhs) if has_method_map => {
let implicitly_ref_args = !hir_util::is_by_value_unop(op);
// As above.
constrain_call(rcx, expr, Some(&lhs),
None::<hir::Expr>.iter(), implicitly_ref_args);
intravisit::walk_expr(rcx, expr);
}
hir::ExprUnary(hir::UnDeref, ref base) => {
// For *a, the lifetime of a must enclose the deref
let method_call = MethodCall::expr(expr.id);
let base_ty = match rcx.fcx.inh.tables.borrow().method_map.get(&method_call) {
Some(method) => {
constrain_call(rcx, expr, Some(&base),
None::<hir::Expr>.iter(), true);
let fn_ret = // late-bound regions in overloaded method calls are instantiated
rcx.tcx().no_late_bound_regions(&method.ty.fn_ret()).unwrap();
fn_ret.unwrap()
}
None => rcx.resolve_node_type(base.id)
};
if let ty::TyRef(r_ptr, _) = base_ty.sty {
mk_subregion_due_to_dereference(
rcx, expr.span, expr_region, *r_ptr);
}
intravisit::walk_expr(rcx, expr);
}
hir::ExprIndex(ref vec_expr, _) => {
// For a[b], the lifetime of a must enclose the deref
let vec_type = rcx.resolve_expr_type_adjusted(&vec_expr);
constrain_index(rcx, expr, vec_type);
intravisit::walk_expr(rcx, expr);
}
hir::ExprCast(ref source, _) => {
// Determine if we are casting `source` to a trait
// instance. If so, we have to be sure that the type of
// the source obeys the trait's region bound.
constrain_cast(rcx, expr, &source);
intravisit::walk_expr(rcx, expr);
}
hir::ExprAddrOf(m, ref base) => {
link_addr_of(rcx, expr, m, &base);
// Require that when you write a `&expr` expression, the
// resulting pointer has a lifetime that encompasses the
// `&expr` expression itself. Note that we constraining
// the type of the node expr.id here *before applying
// adjustments*.
//
// FIXME(#6268) nested method calls requires that this rule change
let ty0 = rcx.resolve_node_type(expr.id);
type_must_outlive(rcx, infer::AddrOf(expr.span), ty0, expr_region);
intravisit::walk_expr(rcx, expr);
}
hir::ExprMatch(ref discr, ref arms, _) => {
link_match(rcx, &discr, &arms[..]);
intravisit::walk_expr(rcx, expr);
}
hir::ExprClosure(_, _, ref body) => {
check_expr_fn_block(rcx, expr, &body);
}
hir::ExprLoop(ref body, _) => {
let repeating_scope = rcx.set_repeating_scope(body.id);
intravisit::walk_expr(rcx, expr);
rcx.set_repeating_scope(repeating_scope);
}
hir::ExprWhile(ref cond, ref body, _) => {
let repeating_scope = rcx.set_repeating_scope(cond.id);
rcx.visit_expr(&cond);
rcx.set_repeating_scope(body.id);
rcx.visit_block(&body);
rcx.set_repeating_scope(repeating_scope);
}
hir::ExprRet(Some(ref ret_expr)) => {
let call_site_scope = rcx.call_site_scope;
debug!("visit_expr ExprRet ret_expr.id {} call_site_scope: {:?}",
ret_expr.id, call_site_scope);
type_of_node_must_outlive(rcx,
infer::CallReturn(ret_expr.span),
ret_expr.id,
ty::ReScope(call_site_scope.unwrap()));
intravisit::walk_expr(rcx, expr);
}
_ => {
intravisit::walk_expr(rcx, expr);
}
}
}
fn constrain_cast(rcx: &mut Rcx,
cast_expr: &hir::Expr,
source_expr: &hir::Expr)
{
debug!("constrain_cast(cast_expr={:?}, source_expr={:?})",
cast_expr,
source_expr);
let source_ty = rcx.resolve_node_type(source_expr.id);
let target_ty = rcx.resolve_node_type(cast_expr.id);
walk_cast(rcx, cast_expr, source_ty, target_ty);
fn walk_cast<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
cast_expr: &hir::Expr,
from_ty: Ty<'tcx>,
to_ty: Ty<'tcx>) {
debug!("walk_cast(from_ty={:?}, to_ty={:?})",
from_ty,
to_ty);
match (&from_ty.sty, &to_ty.sty) {
/*From:*/ (&ty::TyRef(from_r, ref from_mt),
/*To: */ &ty::TyRef(to_r, ref to_mt)) => {
// Target cannot outlive source, naturally.
rcx.fcx.mk_subr(infer::Reborrow(cast_expr.span), *to_r, *from_r);
walk_cast(rcx, cast_expr, from_mt.ty, to_mt.ty);
}
/*From:*/ (_,
/*To: */ &ty::TyTrait(box ty::TraitTy { ref bounds, .. })) => {
// When T is existentially quantified as a trait
// `Foo+'to`, it must outlive the region bound `'to`.
type_must_outlive(rcx, infer::RelateObjectBound(cast_expr.span),
from_ty, bounds.region_bound);
}
/*From:*/ (&ty::TyBox(from_referent_ty),
/*To: */ &ty::TyBox(to_referent_ty)) => {
walk_cast(rcx, cast_expr, from_referent_ty, to_referent_ty);
}
_ => { }
}
}
}
fn check_expr_fn_block(rcx: &mut Rcx,
expr: &hir::Expr,
body: &hir::Block) {
let repeating_scope = rcx.set_repeating_scope(body.id);
intravisit::walk_expr(rcx, expr);
rcx.set_repeating_scope(repeating_scope);
}
fn constrain_callee(rcx: &mut Rcx,
callee_id: ast::NodeId,
_call_expr: &hir::Expr,
_callee_expr: &hir::Expr) {
let callee_ty = rcx.resolve_node_type(callee_id);
match callee_ty.sty {
ty::TyFnDef(..) | ty::TyFnPtr(_) => { }
_ => {
// this should not happen, but it does if the program is
// erroneous
//
// tcx.sess.span_bug(
// callee_expr.span,
// format!("Calling non-function: {}", callee_ty));
}
}
}
fn constrain_call<'a, I: Iterator<Item=&'a hir::Expr>>(rcx: &mut Rcx,
call_expr: &hir::Expr,
receiver: Option<&hir::Expr>,
arg_exprs: I,
implicitly_ref_args: bool) {
//! Invoked on every call site (i.e., normal calls, method calls,
//! and overloaded operators). Constrains the regions which appear
//! in the type of the function. Also constrains the regions that
//! appear in the arguments appropriately.
debug!("constrain_call(call_expr={:?}, \
receiver={:?}, \
implicitly_ref_args={})",
call_expr,
receiver,
implicitly_ref_args);
// `callee_region` is the scope representing the time in which the
// call occurs.
//
// FIXME(#6268) to support nested method calls, should be callee_id
let callee_scope = rcx.tcx().region_maps.node_extent(call_expr.id);
let callee_region = ty::ReScope(callee_scope);
debug!("callee_region={:?}", callee_region);
for arg_expr in arg_exprs {
debug!("Argument: {:?}", arg_expr);
// ensure that any regions appearing in the argument type are
// valid for at least the lifetime of the function:
type_of_node_must_outlive(
rcx, infer::CallArg(arg_expr.span),
arg_expr.id, callee_region);
// unfortunately, there are two means of taking implicit
// references, and we need to propagate constraints as a
// result. modes are going away and the "DerefArgs" code
// should be ported to use adjustments
if implicitly_ref_args {
link_by_ref(rcx, arg_expr, callee_scope);
}
}
// as loop above, but for receiver
if let Some(r) = receiver {
debug!("receiver: {:?}", r);
type_of_node_must_outlive(
rcx, infer::CallRcvr(r.span),
r.id, callee_region);
if implicitly_ref_args {
link_by_ref(rcx, &r, callee_scope);
}
}
}
/// Invoked on any auto-dereference that occurs. Checks that if this is a region pointer being
/// dereferenced, the lifetime of the pointer includes the deref expr.
fn constrain_autoderefs<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
deref_expr: &hir::Expr,
derefs: usize,
mut derefd_ty: Ty<'tcx>)
{
debug!("constrain_autoderefs(deref_expr={:?}, derefs={}, derefd_ty={:?})",
deref_expr,
derefs,
derefd_ty);
let s_deref_expr = rcx.tcx().region_maps.node_extent(deref_expr.id);
let r_deref_expr = ty::ReScope(s_deref_expr);
for i in 0..derefs {
let method_call = MethodCall::autoderef(deref_expr.id, i as u32);
debug!("constrain_autoderefs: method_call={:?} (of {:?} total)", method_call, derefs);
let method = rcx.fcx.inh.tables.borrow().method_map.get(&method_call).map(|m| m.clone());
derefd_ty = match method {
Some(method) => {
debug!("constrain_autoderefs: #{} is overloaded, method={:?}",
i, method);
let origin = infer::ParameterOrigin::OverloadedDeref;
substs_wf_in_scope(rcx, origin, method.substs, deref_expr.span, r_deref_expr);
// Treat overloaded autoderefs as if an AutoRef adjustment
// was applied on the base type, as that is always the case.
let fn_sig = method.ty.fn_sig();
let fn_sig = // late-bound regions should have been instantiated
rcx.tcx().no_late_bound_regions(fn_sig).unwrap();
let self_ty = fn_sig.inputs[0];
let (m, r) = match self_ty.sty {
ty::TyRef(r, ref m) => (m.mutbl, r),
_ => {
rcx.tcx().sess.span_bug(
deref_expr.span,
&format!("bad overloaded deref type {:?}",
method.ty))
}
};
debug!("constrain_autoderefs: receiver r={:?} m={:?}",
r, m);
{
let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
let self_cmt = ignore_err!(mc.cat_expr_autoderefd(deref_expr, i));
debug!("constrain_autoderefs: self_cmt={:?}",
self_cmt);
link_region(rcx, deref_expr.span, r,
ty::BorrowKind::from_mutbl(m), self_cmt);
}
// Specialized version of constrain_call.
type_must_outlive(rcx, infer::CallRcvr(deref_expr.span),
self_ty, r_deref_expr);
match fn_sig.output {
ty::FnConverging(return_type) => {
type_must_outlive(rcx, infer::CallReturn(deref_expr.span),
return_type, r_deref_expr);
return_type
}
ty::FnDiverging => unreachable!()
}
}
None => derefd_ty
};
if let ty::TyRef(r_ptr, _) = derefd_ty.sty {
mk_subregion_due_to_dereference(rcx, deref_expr.span,
r_deref_expr, *r_ptr);
}
match derefd_ty.builtin_deref(true, ty::NoPreference) {
Some(mt) => derefd_ty = mt.ty,
/* if this type can't be dereferenced, then there's already an error
in the session saying so. Just bail out for now */
None => break
}
}
}
pub fn mk_subregion_due_to_dereference(rcx: &mut Rcx,
deref_span: Span,
minimum_lifetime: ty::Region,
maximum_lifetime: ty::Region) {
rcx.fcx.mk_subr(infer::DerefPointer(deref_span),
minimum_lifetime, maximum_lifetime)
}
fn check_safety_of_rvalue_destructor_if_necessary<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
cmt: mc::cmt<'tcx>,
span: Span) {
match cmt.cat {
Categorization::Rvalue(region) => {
match region {
ty::ReScope(rvalue_scope) => {
let typ = rcx.resolve_type(cmt.ty);
dropck::check_safety_of_destructor_if_necessary(rcx,
typ,
span,
rvalue_scope);
}
ty::ReStatic => {}
region => {
rcx.tcx()
.sess
.span_bug(span,
&format!("unexpected rvalue region in rvalue \
destructor safety checking: `{:?}`",
region));
}
}
}
_ => {}
}
}
/// Invoked on any index expression that occurs. Checks that if this is a slice being indexed, the
/// lifetime of the pointer includes the deref expr.
fn constrain_index<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
index_expr: &hir::Expr,
indexed_ty: Ty<'tcx>)
{
debug!("constrain_index(index_expr=?, indexed_ty={}",
rcx.fcx.infcx().ty_to_string(indexed_ty));
let r_index_expr = ty::ReScope(rcx.tcx().region_maps.node_extent(index_expr.id));
if let ty::TyRef(r_ptr, mt) = indexed_ty.sty {
match mt.ty.sty {
ty::TySlice(_) | ty::TyStr => {
rcx.fcx.mk_subr(infer::IndexSlice(index_expr.span),
r_index_expr, *r_ptr);
}
_ => {}
}
}
}
/// Guarantees that any lifetimes which appear in the type of the node `id` (after applying
/// adjustments) are valid for at least `minimum_lifetime`
fn type_of_node_must_outlive<'a, 'tcx>(
rcx: &mut Rcx<'a, 'tcx>,
origin: infer::SubregionOrigin<'tcx>,
id: ast::NodeId,
minimum_lifetime: ty::Region)
{
let tcx = rcx.fcx.tcx();
// Try to resolve the type. If we encounter an error, then typeck
// is going to fail anyway, so just stop here and let typeck
// report errors later on in the writeback phase.
let ty0 = rcx.resolve_node_type(id);
let ty = ty0.adjust(tcx, origin.span(), id,
rcx.fcx.inh.tables.borrow().adjustments.get(&id),
|method_call| rcx.resolve_method_type(method_call));
debug!("constrain_regions_in_type_of_node(\
ty={}, ty0={}, id={}, minimum_lifetime={:?})",
ty, ty0,
id, minimum_lifetime);
type_must_outlive(rcx, origin, ty, minimum_lifetime);
}
/// Computes the guarantor for an expression `&base` and then ensures that the lifetime of the
/// resulting pointer is linked to the lifetime of its guarantor (if any).
fn link_addr_of(rcx: &mut Rcx, expr: &hir::Expr,
mutability: hir::Mutability, base: &hir::Expr) {
debug!("link_addr_of(expr={:?}, base={:?})", expr, base);
let cmt = {
let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
ignore_err!(mc.cat_expr(base))
};
debug!("link_addr_of: cmt={:?}", cmt);
link_region_from_node_type(rcx, expr.span, expr.id, mutability, cmt);
}
/// Computes the guarantors for any ref bindings in a `let` and
/// then ensures that the lifetime of the resulting pointer is
/// linked to the lifetime of the initialization expression.
fn link_local(rcx: &Rcx, local: &hir::Local) {
debug!("regionck::for_local()");
let init_expr = match local.init {
None => { return; }
Some(ref expr) => &**expr,
};
let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
let discr_cmt = ignore_err!(mc.cat_expr(init_expr));
link_pattern(rcx, mc, discr_cmt, &local.pat);
}
/// Computes the guarantors for any ref bindings in a match and
/// then ensures that the lifetime of the resulting pointer is
/// linked to the lifetime of its guarantor (if any).
fn link_match(rcx: &Rcx, discr: &hir::Expr, arms: &[hir::Arm]) {
debug!("regionck::for_match()");
let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
let discr_cmt = ignore_err!(mc.cat_expr(discr));
debug!("discr_cmt={:?}", discr_cmt);
for arm in arms {
for root_pat in &arm.pats {
link_pattern(rcx, mc, discr_cmt.clone(), &root_pat);
}
}
}
/// Computes the guarantors for any ref bindings in a match and
/// then ensures that the lifetime of the resulting pointer is
/// linked to the lifetime of its guarantor (if any).
fn link_fn_args(rcx: &Rcx, body_scope: CodeExtent, args: &[hir::Arg]) {
debug!("regionck::link_fn_args(body_scope={:?})", body_scope);
let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
for arg in args {
let arg_ty = rcx.fcx.node_ty(arg.id);
let re_scope = ty::ReScope(body_scope);
let arg_cmt = mc.cat_rvalue(arg.id, arg.ty.span, re_scope, arg_ty);
debug!("arg_ty={:?} arg_cmt={:?} arg={:?}",
arg_ty,
arg_cmt,
arg);
link_pattern(rcx, mc, arg_cmt, &arg.pat);
}
}
/// Link lifetimes of any ref bindings in `root_pat` to the pointers found in the discriminant, if
/// needed.
fn link_pattern<'t, 'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
mc: mc::MemCategorizationContext<'t, 'a, 'tcx>,
discr_cmt: mc::cmt<'tcx>,
root_pat: &hir::Pat) {
debug!("link_pattern(discr_cmt={:?}, root_pat={:?})",
discr_cmt,
root_pat);
let _ = mc.cat_pattern(discr_cmt, root_pat, |mc, sub_cmt, sub_pat| {
match sub_pat.node {
// `ref x` pattern
PatKind::Ident(hir::BindByRef(mutbl), _, _) => {
link_region_from_node_type(
rcx, sub_pat.span, sub_pat.id,
mutbl, sub_cmt);
}
// `[_, ..slice, _]` pattern
PatKind::Vec(_, Some(ref slice_pat), _) => {
match mc.cat_slice_pattern(sub_cmt, &slice_pat) {
Ok((slice_cmt, slice_mutbl, slice_r)) => {
link_region(rcx, sub_pat.span, &slice_r,
ty::BorrowKind::from_mutbl(slice_mutbl),
slice_cmt);
}
Err(()) => {}
}
}
_ => {}
}
});
}
/// Link lifetime of borrowed pointer resulting from autoref to lifetimes in the value being
/// autoref'd.
fn link_autoref(rcx: &Rcx,
expr: &hir::Expr,
autoderefs: usize,
autoref: &adjustment::AutoRef)
{
debug!("link_autoref(autoref={:?})", autoref);
let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
let expr_cmt = ignore_err!(mc.cat_expr_autoderefd(expr, autoderefs));
debug!("expr_cmt={:?}", expr_cmt);
match *autoref {
adjustment::AutoPtr(r, m) => {
link_region(rcx, expr.span, r,
ty::BorrowKind::from_mutbl(m), expr_cmt);
}
adjustment::AutoUnsafe(m) => {
let r = ty::ReScope(rcx.tcx().region_maps.node_extent(expr.id));
link_region(rcx, expr.span, &r, ty::BorrowKind::from_mutbl(m), expr_cmt);
}
}
}
/// Computes the guarantor for cases where the `expr` is being passed by implicit reference and
/// must outlive `callee_scope`.
fn link_by_ref(rcx: &Rcx,
expr: &hir::Expr,
callee_scope: CodeExtent) {
debug!("link_by_ref(expr={:?}, callee_scope={:?})",
expr, callee_scope);
let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
let expr_cmt = ignore_err!(mc.cat_expr(expr));
let borrow_region = ty::ReScope(callee_scope);
link_region(rcx, expr.span, &borrow_region, ty::ImmBorrow, expr_cmt);
}
/// Like `link_region()`, except that the region is extracted from the type of `id`, which must be
/// some reference (`&T`, `&str`, etc).
fn link_region_from_node_type<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
span: Span,
id: ast::NodeId,
mutbl: hir::Mutability,
cmt_borrowed: mc::cmt<'tcx>) {
debug!("link_region_from_node_type(id={:?}, mutbl={:?}, cmt_borrowed={:?})",
id, mutbl, cmt_borrowed);
let rptr_ty = rcx.resolve_node_type(id);
if let ty::TyRef(&r, _) = rptr_ty.sty {
debug!("rptr_ty={}", rptr_ty);
link_region(rcx, span, &r, ty::BorrowKind::from_mutbl(mutbl),
cmt_borrowed);
}
}
/// Informs the inference engine that `borrow_cmt` is being borrowed with kind `borrow_kind` and
/// lifetime `borrow_region`. In order to ensure borrowck is satisfied, this may create constraints
/// between regions, as explained in `link_reborrowed_region()`.
fn link_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
span: Span,
borrow_region: &ty::Region,
borrow_kind: ty::BorrowKind,
borrow_cmt: mc::cmt<'tcx>) {
let mut borrow_cmt = borrow_cmt;
let mut borrow_kind = borrow_kind;
let origin = infer::DataBorrowed(borrow_cmt.ty, span);
type_must_outlive(rcx, origin, borrow_cmt.ty, *borrow_region);
loop {
debug!("link_region(borrow_region={:?}, borrow_kind={:?}, borrow_cmt={:?})",
borrow_region,
borrow_kind,
borrow_cmt);
match borrow_cmt.cat.clone() {
Categorization::Deref(ref_cmt, _,
mc::Implicit(ref_kind, ref_region)) |
Categorization::Deref(ref_cmt, _,
mc::BorrowedPtr(ref_kind, ref_region)) => {
match link_reborrowed_region(rcx, span,
borrow_region, borrow_kind,
ref_cmt, ref_region, ref_kind,
borrow_cmt.note) {
Some((c, k)) => {
borrow_cmt = c;
borrow_kind = k;
}
None => {
return;
}
}
}
Categorization::Downcast(cmt_base, _) |
Categorization::Deref(cmt_base, _, mc::Unique) |
Categorization::Interior(cmt_base, _) => {
// Borrowing interior or owned data requires the base
// to be valid and borrowable in the same fashion.
borrow_cmt = cmt_base;
borrow_kind = borrow_kind;
}
Categorization::Deref(_, _, mc::UnsafePtr(..)) |
Categorization::StaticItem |
Categorization::Upvar(..) |
Categorization::Local(..) |
Categorization::Rvalue(..) => {
// These are all "base cases" with independent lifetimes
// that are not subject to inference
return;
}
}
}
}
/// This is the most complicated case: the path being borrowed is
/// itself the referent of a borrowed pointer. Let me give an
/// example fragment of code to make clear(er) the situation:
///
/// let r: &'a mut T = ...; // the original reference "r" has lifetime 'a
/// ...
/// &'z *r // the reborrow has lifetime 'z
///
/// Now, in this case, our primary job is to add the inference
/// constraint that `'z <= 'a`. Given this setup, let's clarify the
/// parameters in (roughly) terms of the example:
///
/// A borrow of: `& 'z bk * r` where `r` has type `& 'a bk T`
/// borrow_region ^~ ref_region ^~
/// borrow_kind ^~ ref_kind ^~
/// ref_cmt ^
///
/// Here `bk` stands for some borrow-kind (e.g., `mut`, `uniq`, etc).
///
/// Unfortunately, there are some complications beyond the simple
/// scenario I just painted:
///
/// 1. The reference `r` might in fact be a "by-ref" upvar. In that
/// case, we have two jobs. First, we are inferring whether this reference
/// should be an `&T`, `&mut T`, or `&uniq T` reference, and we must
/// adjust that based on this borrow (e.g., if this is an `&mut` borrow,
/// then `r` must be an `&mut` reference). Second, whenever we link
/// two regions (here, `'z <= 'a`), we supply a *cause*, and in this
/// case we adjust the cause to indicate that the reference being
/// "reborrowed" is itself an upvar. This provides a nicer error message
/// should something go wrong.
///
/// 2. There may in fact be more levels of reborrowing. In the
/// example, I said the borrow was like `&'z *r`, but it might
/// in fact be a borrow like `&'z **q` where `q` has type `&'a
/// &'b mut T`. In that case, we want to ensure that `'z <= 'a`
/// and `'z <= 'b`. This is explained more below.
///
/// The return value of this function indicates whether we need to
/// recurse and process `ref_cmt` (see case 2 above).
fn link_reborrowed_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
span: Span,
borrow_region: &ty::Region,
borrow_kind: ty::BorrowKind,
ref_cmt: mc::cmt<'tcx>,
ref_region: ty::Region,
mut ref_kind: ty::BorrowKind,
note: mc::Note)
-> Option<(mc::cmt<'tcx>, ty::BorrowKind)>
{
// Possible upvar ID we may need later to create an entry in the
// maybe link map.
// Detect by-ref upvar `x`:
let cause = match note {
mc::NoteUpvarRef(ref upvar_id) => {
let upvar_capture_map = &rcx.fcx.inh.tables.borrow_mut().upvar_capture_map;
match upvar_capture_map.get(upvar_id) {
Some(&ty::UpvarCapture::ByRef(ref upvar_borrow)) => {
// The mutability of the upvar may have been modified
// by the above adjustment, so update our local variable.
ref_kind = upvar_borrow.kind;
infer::ReborrowUpvar(span, *upvar_id)
}
_ => {
rcx.tcx().sess.span_bug(
span,
&format!("Illegal upvar id: {:?}",
upvar_id));
}
}
}
mc::NoteClosureEnv(ref upvar_id) => {
// We don't have any mutability changes to propagate, but
// we do want to note that an upvar reborrow caused this
// link
infer::ReborrowUpvar(span, *upvar_id)
}
_ => {
infer::Reborrow(span)
}
};
debug!("link_reborrowed_region: {:?} <= {:?}",
borrow_region,
ref_region);
rcx.fcx.mk_subr(cause, *borrow_region, ref_region);
// If we end up needing to recurse and establish a region link
// with `ref_cmt`, calculate what borrow kind we will end up
// needing. This will be used below.
//
// One interesting twist is that we can weaken the borrow kind
// when we recurse: to reborrow an `&mut` referent as mutable,
// borrowck requires a unique path to the `&mut` reference but not
// necessarily a *mutable* path.
let new_borrow_kind = match borrow_kind {
ty::ImmBorrow =>
ty::ImmBorrow,
ty::MutBorrow | ty::UniqueImmBorrow =>
ty::UniqueImmBorrow
};
// Decide whether we need to recurse and link any regions within
// the `ref_cmt`. This is concerned for the case where the value
// being reborrowed is in fact a borrowed pointer found within
// another borrowed pointer. For example:
//
// let p: &'b &'a mut T = ...;
// ...
// &'z **p
//
// What makes this case particularly tricky is that, if the data
// being borrowed is a `&mut` or `&uniq` borrow, borrowck requires
// not only that `'z <= 'a`, (as before) but also `'z <= 'b`
// (otherwise the user might mutate through the `&mut T` reference
// after `'b` expires and invalidate the borrow we are looking at
// now).
//
// So let's re-examine our parameters in light of this more
// complicated (possible) scenario:
//
// A borrow of: `& 'z bk * * p` where `p` has type `&'b bk & 'a bk T`
// borrow_region ^~ ref_region ^~
// borrow_kind ^~ ref_kind ^~
// ref_cmt ^~~
//
// (Note that since we have not examined `ref_cmt.cat`, we don't
// know whether this scenario has occurred; but I wanted to show
// how all the types get adjusted.)
match ref_kind {
ty::ImmBorrow => {
// The reference being reborrowed is a sharable ref of
// type `&'a T`. In this case, it doesn't matter where we
// *found* the `&T` pointer, the memory it references will
// be valid and immutable for `'a`. So we can stop here.
//
// (Note that the `borrow_kind` must also be ImmBorrow or
// else the user is borrowed imm memory as mut memory,
// which means they'll get an error downstream in borrowck
// anyhow.)
return None;
}
ty::MutBorrow | ty::UniqueImmBorrow => {
// The reference being reborrowed is either an `&mut T` or
// `&uniq T`. This is the case where recursion is needed.
return Some((ref_cmt, new_borrow_kind));
}
}
}
/// Checks that the values provided for type/region arguments in a given
/// expression are well-formed and in-scope.
pub fn substs_wf_in_scope<'a,'tcx>(rcx: &mut Rcx<'a,'tcx>,
origin: infer::ParameterOrigin,
substs: &Substs<'tcx>,
expr_span: Span,
expr_region: ty::Region) {
debug!("substs_wf_in_scope(substs={:?}, \
expr_region={:?}, \
origin={:?}, \
expr_span={:?})",
substs, expr_region, origin, expr_span);
let origin = infer::ParameterInScope(origin, expr_span);
for ®ion in substs.regions() {
rcx.fcx.mk_subr(origin.clone(), expr_region, region);
}
for &ty in &substs.types {
let ty = rcx.resolve_type(ty);
type_must_outlive(rcx, origin.clone(), ty, expr_region);
}
}
/// Ensures that type is well-formed in `region`, which implies (among
/// other things) that all borrowed data reachable via `ty` outlives
/// `region`.
pub fn type_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
origin: infer::SubregionOrigin<'tcx>,
ty: Ty<'tcx>,
region: ty::Region)
{
let ty = rcx.resolve_type(ty);
debug!("type_must_outlive(ty={:?}, region={:?}, origin={:?})",
ty,
region,
origin);
assert!(!ty.has_escaping_regions());
let components = ty::outlives::components(rcx.infcx(), ty);
components_must_outlive(rcx, origin, components, region);
}
fn components_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
origin: infer::SubregionOrigin<'tcx>,
components: Vec<ty::outlives::Component<'tcx>>,
region: ty::Region)
{
for component in components {
let origin = origin.clone();
match component {
ty::outlives::Component::Region(region1) => {
rcx.fcx.mk_subr(origin, region, region1);
}
ty::outlives::Component::Param(param_ty) => {
param_ty_must_outlive(rcx, origin, region, param_ty);
}
ty::outlives::Component::Projection(projection_ty) => {
projection_must_outlive(rcx, origin, region, projection_ty);
}
ty::outlives::Component::EscapingProjection(subcomponents) => {
components_must_outlive(rcx, origin, subcomponents, region);
}
ty::outlives::Component::UnresolvedInferenceVariable(v) => {
// ignore this, we presume it will yield an error
// later, since if a type variable is not resolved by
// this point it never will be
rcx.tcx().sess.delay_span_bug(
origin.span(),
&format!("unresolved inference variable in outlives: {:?}", v));
}
}
}
}
fn param_ty_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
origin: infer::SubregionOrigin<'tcx>,
region: ty::Region,
param_ty: ty::ParamTy) {
debug!("param_ty_must_outlive(region={:?}, param_ty={:?}, origin={:?})",
region, param_ty, origin);
let verify_bound = param_bound(rcx, param_ty);
let generic = GenericKind::Param(param_ty);
rcx.fcx.infcx().verify_generic_bound(origin, generic, region, verify_bound);
}
fn projection_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
origin: infer::SubregionOrigin<'tcx>,
region: ty::Region,
projection_ty: ty::ProjectionTy<'tcx>)
{
debug!("projection_must_outlive(region={:?}, projection_ty={:?}, origin={:?})",
region, projection_ty, origin);
// This case is thorny for inference. The fundamental problem is
// that there are many cases where we have choice, and inference
// doesn't like choice (the current region inference in
// particular). :) First off, we have to choose between using the
// OutlivesProjectionEnv, OutlivesProjectionTraitDef, and
// OutlivesProjectionComponent rules, any one of which is
// sufficient. If there are no inference variables involved, it's
// not hard to pick the right rule, but if there are, we're in a
// bit of a catch 22: if we picked which rule we were going to
// use, we could add constraints to the region inference graph
// that make it apply, but if we don't add those constraints, the
// rule might not apply (but another rule might). For now, we err
// on the side of adding too few edges into the graph.
// Compute the bounds we can derive from the environment or trait
// definition. We know that the projection outlives all the
// regions in this list.
let env_bounds = projection_declared_bounds(rcx, origin.span(), projection_ty);
debug!("projection_must_outlive: env_bounds={:?}",
env_bounds);
// If we know that the projection outlives 'static, then we're
// done here.
if env_bounds.contains(&ty::ReStatic) {
debug!("projection_must_outlive: 'static as declared bound");
return;
}
// If declared bounds list is empty, the only applicable rule is
// OutlivesProjectionComponent. If there are inference variables,
// then, we can break down the outlives into more primitive
// components without adding unnecessary edges.
//
// If there are *no* inference variables, however, we COULD do
// this, but we choose not to, because the error messages are less
// good. For example, a requirement like `T::Item: 'r` would be
// translated to a requirement that `T: 'r`; when this is reported
// to the user, it will thus say "T: 'r must hold so that T::Item:
// 'r holds". But that makes it sound like the only way to fix
// the problem is to add `T: 'r`, which isn't true. So, if there are no
// inference variables, we use a verify constraint instead of adding
// edges, which winds up enforcing the same condition.
let needs_infer = {
projection_ty.trait_ref.substs.types.iter().any(|t| t.needs_infer()) ||
projection_ty.trait_ref.substs.regions().iter().any(|r| r.needs_infer())
};
if env_bounds.is_empty() && needs_infer {
debug!("projection_must_outlive: no declared bounds");
for &component_ty in &projection_ty.trait_ref.substs.types {
type_must_outlive(rcx, origin.clone(), component_ty, region);
}
for &r in projection_ty.trait_ref.substs.regions() {
rcx.fcx.mk_subr(origin.clone(), region, r);
}
return;
}
// If we find that there is a unique declared bound `'b`, and this bound
// appears in the trait reference, then the best action is to require that `'b:'r`,
// so do that. This is best no matter what rule we use:
//
// - OutlivesProjectionEnv or OutlivesProjectionTraitDef: these would translate to
// the requirement that `'b:'r`
// - OutlivesProjectionComponent: this would require `'b:'r` in addition to other conditions
if !env_bounds.is_empty() && env_bounds[1..].iter().all(|b| *b == env_bounds[0]) {
let unique_bound = env_bounds[0];
debug!("projection_must_outlive: unique declared bound = {:?}", unique_bound);
if projection_ty.trait_ref.substs.regions()
.iter()
.any(|r| env_bounds.contains(r))
{
debug!("projection_must_outlive: unique declared bound appears in trait ref");
rcx.fcx.mk_subr(origin.clone(), region, unique_bound);
return;
}
}
// Fallback to verifying after the fact that there exists a
// declared bound, or that all the components appearing in the
// projection outlive; in some cases, this may add insufficient
// edges into the inference graph, leading to inference failures
// even though a satisfactory solution exists.
let verify_bound = projection_bound(rcx, origin.span(), env_bounds, projection_ty);
let generic = GenericKind::Projection(projection_ty);
rcx.fcx.infcx().verify_generic_bound(origin, generic.clone(), region, verify_bound);
}
fn type_bound<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, span: Span, ty: Ty<'tcx>) -> VerifyBound {
match ty.sty {
ty::TyParam(p) => {
param_bound(rcx, p)
}
ty::TyProjection(data) => {
let declared_bounds = projection_declared_bounds(rcx, span, data);
projection_bound(rcx, span, declared_bounds, data)
}
_ => {
recursive_type_bound(rcx, span, ty)
}
}
}
fn param_bound<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, param_ty: ty::ParamTy) -> VerifyBound {
let param_env = &rcx.infcx().parameter_environment;
debug!("param_bound(param_ty={:?})",
param_ty);
let mut param_bounds = declared_generic_bounds_from_env(rcx, GenericKind::Param(param_ty));
// Add in the default bound of fn body that applies to all in
// scope type parameters:
param_bounds.push(param_env.implicit_region_bound);
VerifyBound::AnyRegion(param_bounds)
}
fn projection_declared_bounds<'a, 'tcx>(rcx: &Rcx<'a,'tcx>,
span: Span,
projection_ty: ty::ProjectionTy<'tcx>)
-> Vec<ty::Region>
{
// First assemble bounds from where clauses and traits.
let mut declared_bounds =
declared_generic_bounds_from_env(rcx, GenericKind::Projection(projection_ty));
declared_bounds.extend_from_slice(
&declared_projection_bounds_from_trait(rcx, span, projection_ty));
declared_bounds
}
fn projection_bound<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
span: Span,
declared_bounds: Vec<ty::Region>,
projection_ty: ty::ProjectionTy<'tcx>)
-> VerifyBound {
debug!("projection_bound(declared_bounds={:?}, projection_ty={:?})",
declared_bounds, projection_ty);
// see the extensive comment in projection_must_outlive
let ty = rcx.tcx().mk_projection(projection_ty.trait_ref, projection_ty.item_name);
let recursive_bound = recursive_type_bound(rcx, span, ty);
VerifyBound::AnyRegion(declared_bounds).or(recursive_bound)
}
fn recursive_type_bound<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
span: Span,
ty: Ty<'tcx>)
-> VerifyBound {
let mut bounds = vec![];
for subty in ty.walk_shallow() {
bounds.push(type_bound(rcx, span, subty));
}
let mut regions = ty.regions();
regions.retain(|r| !r.is_bound()); // ignore late-bound regions
bounds.push(VerifyBound::AllRegions(regions));
// remove bounds that must hold, since they are not interesting
bounds.retain(|b| !b.must_hold());
if bounds.len() == 1 {
bounds.pop().unwrap()
} else {
VerifyBound::AllBounds(bounds)
}
}
fn declared_generic_bounds_from_env<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
generic: GenericKind<'tcx>)
-> Vec<ty::Region>
{
let param_env = &rcx.infcx().parameter_environment;
// To start, collect bounds from user:
let mut param_bounds = rcx.tcx().required_region_bounds(generic.to_ty(rcx.tcx()),
param_env.caller_bounds.clone());
// Next, collect regions we scraped from the well-formedness
// constraints in the fn signature. To do that, we walk the list
// of known relations from the fn ctxt.
//
// This is crucial because otherwise code like this fails:
//
// fn foo<'a, A>(x: &'a A) { x.bar() }
//
// The problem is that the type of `x` is `&'a A`. To be
// well-formed, then, A must be lower-generic by `'a`, but we
// don't know that this holds from first principles.
for &(r, p) in &rcx.region_bound_pairs {
debug!("generic={:?} p={:?}",
generic,
p);
if generic == p {
param_bounds.push(r);
}
}
param_bounds
}
fn declared_projection_bounds_from_trait<'a,'tcx>(rcx: &Rcx<'a, 'tcx>,
span: Span,
projection_ty: ty::ProjectionTy<'tcx>)
-> Vec<ty::Region>
{
let fcx = rcx.fcx;
let tcx = fcx.tcx();
let infcx = fcx.infcx();
debug!("projection_bounds(projection_ty={:?})",
projection_ty);
let ty = tcx.mk_projection(projection_ty.trait_ref.clone(), projection_ty.item_name);
// Say we have a projection `<T as SomeTrait<'a>>::SomeType`. We are interested
// in looking for a trait definition like:
//
// ```
// trait SomeTrait<'a> {
// type SomeType : 'a;
// }
// ```
//
// we can thus deduce that `<T as SomeTrait<'a>>::SomeType : 'a`.
let trait_predicates = tcx.lookup_predicates(projection_ty.trait_ref.def_id);
let predicates = trait_predicates.predicates.as_slice().to_vec();
traits::elaborate_predicates(tcx, predicates)
.filter_map(|predicate| {
// we're only interesting in `T : 'a` style predicates:
let outlives = match predicate {
ty::Predicate::TypeOutlives(data) => data,
_ => { return None; }
};
debug!("projection_bounds: outlives={:?} (1)",
outlives);
// apply the substitutions (and normalize any projected types)
let outlives = fcx.instantiate_type_scheme(span,
projection_ty.trait_ref.substs,
&outlives);
debug!("projection_bounds: outlives={:?} (2)",
outlives);
let region_result = infcx.commit_if_ok(|_| {
let (outlives, _) =
infcx.replace_late_bound_regions_with_fresh_var(
span,
infer::AssocTypeProjection(projection_ty.item_name),
&outlives);
debug!("projection_bounds: outlives={:?} (3)",
outlives);
// check whether this predicate applies to our current projection
match infer::mk_eqty(infcx, false, TypeOrigin::Misc(span), ty, outlives.0) {
Ok(()) => { Ok(outlives.1) }
Err(_) => { Err(()) }
}
});
debug!("projection_bounds: region_result={:?}",
region_result);
region_result.ok()
})
.collect()
}
| 40.973133 | 99 | 0.55834 |
bfa7e932a9a90c2b8c9b855a73a832065fdb73fb | 3,345 | use super::super::app::{ActiveBlock, App, RouteId, LIBRARY_OPTIONS};
use super::common_key_events;
use termion::event::Key;
pub fn handler(key: Key, app: &mut App) {
match key {
k if common_key_events::right_event(k) => common_key_events::handle_right_event(app),
k if common_key_events::down_event(k) => {
let next_index = common_key_events::on_down_press_handler(
&LIBRARY_OPTIONS,
Some(app.library.selected_index),
);
app.library.selected_index = next_index;
}
k if common_key_events::up_event(k) => {
let next_index = common_key_events::on_up_press_handler(
&LIBRARY_OPTIONS,
Some(app.library.selected_index),
);
app.library.selected_index = next_index;
}
// `library` should probably be an array of structs with enums rather than just using indexes
// like this
Key::Char('\n') => match app.library.selected_index {
// Made For You,
0 => {
app.push_navigation_stack(RouteId::MadeForYou, ActiveBlock::MadeForYou);
}
// Recently Played,
1 => {
if let Some(spotify) = &app.spotify {
match spotify
// Seems I need to clone here becuase `current_user_recently_played`
// consumes `self`?
.clone()
.current_user_recently_played(app.large_search_limit)
{
Ok(result) => {
app.recently_played.result = Some(result.clone());
app.current_user_saved_tracks_contains(
result
.items
.iter()
.filter_map(|item| item.track.id.clone())
.collect::<Vec<String>>(),
);
app.push_navigation_stack(
RouteId::RecentlyPlayed,
ActiveBlock::RecentlyPlayed,
);
}
Err(e) => {
app.handle_error(e);
}
}
};
}
// Liked Songs,
2 => {
app.get_current_user_saved_tracks(None);
app.push_navigation_stack(RouteId::TrackTable, ActiveBlock::TrackTable);
}
// Albums,
3 => {
app.get_current_user_saved_albums(Some(0));
app.push_navigation_stack(RouteId::AlbumList, ActiveBlock::AlbumList);
}
// Artists,
4 => {
app.get_artists(None);
app.push_navigation_stack(RouteId::Artists, ActiveBlock::Artists);
}
// Podcasts,
5 => {
app.push_navigation_stack(RouteId::Podcasts, ActiveBlock::Podcasts);
}
// This is required because Rust can't tell if this pattern in exhaustive
_ => {}
},
_ => (),
};
}
| 39.352941 | 101 | 0.459791 |
d6ae283c9faa1970c56c3232f0a8d5d39cf8bc35 | 5,329 | mod auth;
mod environment;
mod graphql;
mod helpers;
mod model;
mod session;
mod sql;
use clap::Clap;
use environment::Environment;
use helpers::problem;
use hyper::server::Server;
use listenfd::ListenFd;
use std::convert::Infallible;
use std::net::SocketAddr;
use warp::Filter;
#[derive(Clap, Debug)]
#[clap(
name = "warp-api-app",
rename_all = "kebab-case",
rename_all_env = "screaming-snake"
)]
pub struct Args {
#[clap(short, long)]
debug: bool,
#[clap(required = true, short = "D", long, env)]
database_url: String,
#[clap(required = true, short = "R", long, env)]
redis_url: String,
#[clap(required = true, long, env)]
jwt_secret: String,
#[clap(required = true, long, env)]
argon_secret: String,
#[clap(long, env)]
argon_iterations: Option<u32>,
#[clap(long, env)]
argon_memory_size: Option<u32>,
#[clap(short, long, env)]
session_lifetime: Option<i64>,
#[clap(default_value = "127.0.0.1:3535", env)]
host: SocketAddr,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt::init();
if dotenv::dotenv().is_err() {
eprintln!("Warning: Did not find .env file in current working directory!");
}
let args = Args::parse();
let env = Environment::new(&args).await?;
let env = warp::any().map(move || env.clone());
let cors = warp::cors()
.allow_methods(vec!["GET", "POST"])
.allow_header("content-type")
.allow_header("authorization")
.allow_any_origin()
.build();
let log = warp::log("api::request");
let status = warp::path("status")
.and(warp::get())
.and(warp::path::end())
.map(|| format!("OK"));
let auth = warp::path("auth")
.and(warp::post())
.and(warp::path::end())
.and(env.clone())
.and(warp::body::json())
.and(warp::addr::remote())
.and_then(|env, req, addr| async move {
auth::filter(env, req, addr).await.map_err(problem::build)
});
let graphql = {
use futures::FutureExt as _;
use juniper_subscriptions::Coordinator;
use juniper_warp::{
make_graphql_filter, playground_filter, subscriptions::graphql_subscriptions,
};
use serde::Deserialize;
use std::sync::Arc;
use warp::Filter;
#[derive(Deserialize, Debug)]
struct Query {
csrf: Option<String>,
}
let auth = warp::header("authorization")
.or(warp::cookie("jwt"))
.unify()
.map(Some)
.or(warp::any().map(|| None))
.unify()
.and(warp::query())
.and_then(|jwt: Option<String>, query: Query| async {
if jwt.is_none() && query.csrf.is_none() {
return Ok(None);
}
if jwt.is_none() || query.csrf.is_none() {
return Err(problem::build(auth::AuthError::InvalidCredentials));
}
Ok(Some((jwt.unwrap(), query.csrf.unwrap())))
});
let context = warp::any()
.and(env.clone())
.and(auth)
.and_then(|env, auth| async {
graphql::Context::new(env, auth)
.await
.map_err(problem::build)
})
.boxed();
let coordinator = Arc::new(Coordinator::new(graphql::schema()));
let query = warp::path("query")
.and(warp::post())
.and(warp::path::end())
.and(make_graphql_filter(graphql::schema(), context.clone()));
let subscriptions = warp::path("subscriptions")
.and(warp::path::end())
.and(warp::ws())
.and(context)
.and(warp::any().map(move || Arc::clone(&coordinator)))
.map(|socket: warp::ws::Ws, context, coordinator| {
socket.on_upgrade(|socket| {
graphql_subscriptions(socket, coordinator, context)
.map(|res| {
if let Err(err) = res {
tracing::error!("websocket error: {:?}", err);
}
})
.boxed()
})
})
.map(|reply| warp::reply::with_header(reply, "Sec-WebSocket-Protocol", "graphql-ws"));
let playground = warp::path("playground")
.and(warp::path::end())
.and(playground_filter(
"/graphql/query",
Some("/graphql/subscriptions"),
));
warp::path("graphql").and(query.or(subscriptions).or(playground))
};
let svc = warp::service(
auth.or(status)
.or(graphql)
.recover(problem::unpack)
.with(cors)
.with(log),
);
let make_svc = hyper::service::make_service_fn(|_: _| {
let svc = svc.clone();
async move { Ok::<_, Infallible>(svc) }
});
let mut listenfd = ListenFd::from_env();
let server = if let Some(l) = listenfd.take_tcp_listener(0).unwrap() {
Server::from_tcp(l)?
} else {
Server::bind(&args.host)
};
server.serve(make_svc).await?;
Ok(())
}
| 29.441989 | 98 | 0.51642 |
2968e2e83dbd76458eb32609cc352a3e49d7d9a3 | 2,415 | use crate::{
Capacity, Clear, Collection, CollectionMut, CollectionRef, Get, GetMut, Iter, IterMut, Len,
PopBack, PushBack, Remove, Reserve, WithCapacity,
};
use smallvec::{Array, SmallVec};
impl<A: Array> Collection for SmallVec<A> {
type Item = A::Item;
}
impl<A: Array> CollectionRef for SmallVec<A> {
type ItemRef<'a>
where
Self: 'a,
= &'a A::Item;
crate::covariant_item_ref!();
}
impl<A: Array> CollectionMut for SmallVec<A> {
type ItemMut<'a>
where
Self: 'a,
= &'a mut A::Item;
crate::covariant_item_mut!();
}
impl<A: Array> WithCapacity for SmallVec<A> {
#[inline(always)]
fn with_capacity(capacity: usize) -> Self {
SmallVec::with_capacity(capacity)
}
}
impl<A: Array> Len for SmallVec<A> {
#[inline(always)]
fn len(&self) -> usize {
self.len()
}
#[inline(always)]
fn is_empty(&self) -> bool {
self.is_empty()
}
}
impl<A: Array> Capacity for SmallVec<A> {
#[inline(always)]
fn capacity(&self) -> usize {
self.capacity()
}
}
impl<A: Array> Reserve for SmallVec<A> {
#[inline(always)]
fn reserve(&mut self, additional: usize) {
self.reserve(additional)
}
}
impl<A: Array> Get<usize> for SmallVec<A> {
#[inline(always)]
fn get(&self, index: usize) -> Option<&A::Item> {
self.as_slice().get(index)
}
}
impl<A: Array> GetMut<usize> for SmallVec<A> {
#[inline(always)]
fn get_mut(&mut self, index: usize) -> Option<&mut A::Item> {
self.as_mut_slice().get_mut(index)
}
}
impl<A: Array> PushBack for SmallVec<A> {
type Output = ();
#[inline(always)]
fn push_back(&mut self, t: A::Item) {
self.push(t)
}
}
impl<A: Array> PopBack for SmallVec<A> {
#[inline(always)]
fn pop_back(&mut self) -> Option<A::Item> {
self.pop()
}
}
impl<A: Array> Remove<usize> for SmallVec<A> {
#[inline(always)]
fn remove(&mut self, index: usize) -> Option<A::Item> {
if index < self.len() {
Some(self.remove(index))
} else {
None
}
}
}
impl<A: Array> Clear for SmallVec<A> {
#[inline(always)]
fn clear(&mut self) {
self.clear()
}
}
impl<A: Array> Iter for SmallVec<A> {
type Iter<'a>
where
A: 'a,
= std::slice::Iter<'a, A::Item>;
#[inline(always)]
fn iter(&self) -> Self::Iter<'_> {
self.as_slice().iter()
}
}
impl<A: Array> IterMut for SmallVec<A> {
type IterMut<'a>
where
A: 'a,
= std::slice::IterMut<'a, A::Item>;
#[inline(always)]
fn iter_mut(&mut self) -> Self::IterMut<'_> {
self.as_mut_slice().iter_mut()
}
}
| 18.157895 | 92 | 0.632712 |
ddde6fc7ab9778544d31329c1a6104d029c86ab5 | 2,030 | //! An interface trait so that rest of Youki can call
//! necessary functions without having to worry about their
//! implementation details
use std::{any::Any, ffi::OsStr, path::Path, sync::Arc};
use anyhow::Result;
use bitflags::bitflags;
use caps::{CapSet, CapsHashSet};
use nix::{
mount::MsFlags,
sched::CloneFlags,
sys::stat::{Mode, SFlag},
unistd::{Gid, Uid},
};
use oci_spec::runtime::LinuxRlimit;
use crate::syscall::{linux::LinuxSyscall, test::TestHelperSyscall};
/// This specifies various kernel/other functionalities required for
/// container management
pub trait Syscall {
fn as_any(&self) -> &dyn Any;
fn pivot_rootfs(&self, path: &Path) -> Result<()>;
fn chroot(&self, path: &Path) -> Result<()>;
fn set_ns(&self, rawfd: i32, nstype: CloneFlags) -> Result<()>;
fn set_id(&self, uid: Uid, gid: Gid) -> Result<()>;
fn unshare(&self, flags: CloneFlags) -> Result<()>;
fn set_capability(&self, cset: CapSet, value: &CapsHashSet) -> Result<()>;
fn set_hostname(&self, hostname: &str) -> Result<()>;
fn set_rlimit(&self, rlimit: &LinuxRlimit) -> Result<()>;
fn get_pwuid(&self, uid: u32) -> Option<Arc<OsStr>>;
fn mount(
&self,
source: Option<&Path>,
target: &Path,
fstype: Option<&str>,
flags: MsFlags,
data: Option<&str>,
) -> Result<()>;
fn symlink(&self, original: &Path, link: &Path) -> Result<()>;
fn mknod(&self, path: &Path, kind: SFlag, perm: Mode, dev: u64) -> Result<()>;
fn chown(&self, path: &Path, owner: Option<Uid>, group: Option<Gid>) -> Result<()>;
fn set_groups(&self, groups: &[Gid]) -> Result<()>;
fn close_range(&self, preserve_fds: i32) -> Result<()>;
}
pub fn create_syscall() -> Box<dyn Syscall> {
if cfg!(test) {
Box::new(TestHelperSyscall::default())
} else {
Box::new(LinuxSyscall)
}
}
bitflags! {
pub struct CloseRange : usize {
const NONE = 0b00000000;
const UNSHARE = 0b00000010;
const CLOEXEC = 0b00000100;
}}
| 32.741935 | 87 | 0.620197 |
9022fce191c26440380b95f544e34e9c2d44f10f | 5,434 | // main.rs
use failure::{err_msg, Fail};
use futures::{future, future::Either, Future};
use hyper::client::connect::{Destination, HttpConnector};
use tower_grpc::Request;
use tower_hyper::client::ConnectError;
use tower_hyper::{client, util};
use tower_util::MakeService;
use crate::etcdserverpb::RangeRequest;
use crate::etcdserverpb::client::Kv;
pub mod mvccpb {
include!(concat!(env!("OUT_DIR"), "/mvccpb.rs"));
}
pub mod authpb {
include!(concat!(env!("OUT_DIR"), "/authpb.rs"));
}
pub mod etcdserverpb {
include!(concat!(env!("OUT_DIR"), "/etcdserverpb.rs"));
}
type HTTPConn = tower_request_modifier::RequestModifier<
tower_hyper::client::Connection<tower_grpc::BoxBody>,
tower_grpc::BoxBody,
>;
fn main() {
let _ = ::env_logger::init();
let host = "127.0.0.1";
let port = 2379;
let key = "hello";
let uri: http::Uri = format!("http://{}:{}", host, port).parse().unwrap();
let dst = Destination::try_from_uri(uri.clone()).unwrap();
let connector = util::Connector::new(HttpConnector::new(4));
let settings = client::Builder::new().http2_only(true).clone();
let mut make_client = client::Connect::with_builder(connector, settings);
let say_hello = make_client
.make_service(dst)
.map(move |conn| {
use etcdserverpb::client::Kv;
let conn = tower_request_modifier::Builder::new()
.set_origin(uri)
.build(conn)
.unwrap();
Kv::new(conn)
})
.and_then(move |mut client| {
use etcdserverpb::RangeRequest;
client
.range(Request::new(RangeRequest {
key: key.as_bytes().to_vec(),
..Default::default()
}))
.map_err(|e| panic!("gRPC request failed; err={:?}", e))
})
.and_then(|response| {
println!("RESPONSE = {:?}", response);
Ok(())
})
.map_err(|e| {
println!("ERR = {:?}", e);
});
tokio::run(say_hello);
let run = KvClient::new(host, port)
.map_err(|e| println!("ERR = {:?}", e))
.and_then(move |mut client| {
client
.get_string(key)
.map(|resp| (client, resp))
.map_err(|e| println!("ERR = {:?}", e))
})
.and_then(|(client, resp)| {
println!("resp=> {:?}", resp);
Ok(client)
})
.and_then(|client| Ok(()))
.map_err(|e| println!("ERR = {:?}", e));
tokio::run(run);
}
#[derive(Debug, Fail)]
pub enum EtcdClientError {
#[fail(display = "connect error: {}", _0)]
Connect(ConnectError<std::io::Error>),
#[fail(display = "error message: {}", _0)]
ErrMsg(String),
}
struct KvClient {
inner: Kv<HTTPConn>,
}
impl KvClient {
pub fn new(host: &str, port: u16) -> impl Future<Item = KvClient, Error = EtcdClientError> {
let uri: http::Uri = match format!("http://{}:{}", host, port).parse() {
Ok(uri) => uri,
Err(e) => {
return Either::A(future::err(EtcdClientError::ErrMsg(format!(
"parse uri failed, {:?}",
e
))))
}
};
let dst = match Destination::try_from_uri(uri.clone()) {
Ok(dst) => dst,
Err(e) => {
return Either::A(future::err(EtcdClientError::ErrMsg(format!(
"build dst from uri failed, {:?}",
e
))))
}
};
let connector = util::Connector::new(HttpConnector::new(4));
let settings = client::Builder::new().http2_only(true).clone();
let mut make_client = client::Connect::with_builder(connector, settings);
Either::B(
make_client
.make_service(dst)
.map(move |conn| {
let conn = tower_request_modifier::Builder::new()
.set_origin(uri)
.build(conn)
.unwrap();
KvClient {
inner: Kv::new(conn),
}
})
.map_err(|e| EtcdClientError::ErrMsg(format!("parse uri failed, {:?}", e))),
)
}
pub fn get_bytes(
&mut self,
key: &str,
) -> impl Future<Item = Option<Vec<u8>>, Error = failure::Error> {
self.inner
.range(Request::new(RangeRequest {
key: key.as_bytes().to_vec(),
..Default::default()
}))
.map_err(|e| panic!("gRPC request failed; err={:?}", e))
.and_then(|resp| Ok(resp.into_inner().kvs.first().map(|kv| kv.value.to_vec())))
}
pub fn get_string(
&mut self,
key: &str,
) -> impl Future<Item = Option<String>, Error = failure::Error> {
self.inner
.range(Request::new(RangeRequest {
key: key.as_bytes().to_vec(),
..Default::default()
}))
.map_err(|e| panic!("gRPC request failed; err={:?}", e))
.and_then(|resp| {
Ok(resp
.into_inner()
.kvs
.first()
.map(|kv| String::from_utf8_lossy(&kv.value).to_string()))
})
}
}
| 29.857143 | 96 | 0.493191 |
e8392af8948f9952114ecf942ea25dbd58bf6bd1 | 1,921 | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_exception::Result;
use common_planners::DropUDFPlan;
use common_streams::DataBlockStream;
use common_streams::SendableDataBlockStream;
use common_tracing::tracing;
use crate::interpreters::Interpreter;
use crate::interpreters::InterpreterPtr;
use crate::sessions::QueryContext;
#[derive(Debug)]
pub struct DropUDFInterpreter {
ctx: Arc<QueryContext>,
plan: DropUDFPlan,
}
impl DropUDFInterpreter {
pub fn try_create(ctx: Arc<QueryContext>, plan: DropUDFPlan) -> Result<InterpreterPtr> {
Ok(Arc::new(DropUDFInterpreter { ctx, plan }))
}
}
#[async_trait::async_trait]
impl Interpreter for DropUDFInterpreter {
fn name(&self) -> &str {
"DropUDFInterpreter"
}
#[tracing::instrument(level = "info", skip(self, _input_stream), fields(ctx.id = self.ctx.get_id().as_str()))]
async fn execute(
&self,
_input_stream: Option<SendableDataBlockStream>,
) -> Result<SendableDataBlockStream> {
let plan = self.plan.clone();
let user_mgr = self.ctx.get_sessions_manager().get_user_manager();
user_mgr
.drop_udf(plan.name.as_str(), plan.if_exists)
.await?;
Ok(Box::pin(DataBlockStream::create(
self.plan.schema(),
None,
vec![],
)))
}
}
| 30.492063 | 114 | 0.686101 |
f7901b3594531951249e2ddf3d25d278aaf82f40 | 15,408 | use std::convert::TryFrom;
/// The opcodes of the vm.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(u8)]
pub enum Opcode {
/// Pop the top value from the stack.
///
/// Operands:
///
/// Stack: value **=>**
Pop,
/// Push a copy of the top value on the stack.
///
/// Operands:
///
/// Stack: value **=>** value, value
Dup,
/// Swap the top two values on the stack.
///
/// Operands:
///
/// Stack: v1, v2 **=>** v2, v1
Swap,
/// Push integer `0` on the stack.
///
/// Operands:
///
/// Stack: **=>** 0
PushZero,
/// Push integer `1` on the stack.
///
/// Operands:
///
/// Stack: **=>** 1
PushOne,
/// Push `i8` value on the stack.
///
/// Operands: value: `i8`
///
/// Stack: **=>** value
PushInt8,
/// Push i16 value on the stack.
///
/// Operands: value: `i16`
///
/// Stack: **=>** value
PushInt16,
/// Push i32 value on the stack.
///
/// Operands: value: `i32`
///
/// Stack: **=>** value
PushInt32,
/// Push `f64` value on the stack.
///
/// Operands: value: `f64`
///
/// Stack: **=>** value
PushRational,
/// Push `NaN` teger on the stack.
///
/// Operands:
///
/// Stack: **=>** `NaN`
PushNaN,
/// Push `Infinity` value on the stack.
///
/// Operands:
///
/// Stack: **=>** `Infinity`
PushPositiveInfinity,
/// Push `-Infinity` value on the stack.
///
/// Operands:
///
/// Stack: **=>** `-Infinity`
PushNegativeInfinity,
/// Push `null` value on the stack.
///
/// Operands:
///
/// Stack: **=>** `null`
PushNull,
/// Push `true` value on the stack.
///
/// Operands:
///
/// Stack: **=>** `true`
PushTrue,
/// Push `false` value on the stack.
///
/// Operands:
///
/// Stack: **=>** `false`
PushFalse,
/// Push `undefined` value on the stack.
///
/// Operands:
///
/// Stack: **=>** `undefined`
PushUndefined,
/// Push literal value on the stack.
///
/// Like strings and bigints. The index oprand is used to index into the `literals`
/// array to get the value.
///
/// Operands: index: `u32`
///
/// Stack: **=>** (`literals[index]`)
PushLiteral,
/// Push empty object `{}` value on the stack.
///
/// Operands:
///
/// Stack: **=>** object
PushEmptyObject,
/// Push array object `{}` value on the stack.
///
/// Operands: n: `u32`
///
/// Stack: v1, v1, ... vn **=>** [v1, v2, ..., vn]
PushNewArray,
/// Binary `+` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs + rhs)
Add,
/// Binary `-` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs - rhs)
Sub,
/// Binary `/` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs / rhs)
Div,
/// Binary `*` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs * rhs)
Mul,
/// Binary `%` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs % rhs)
Mod,
/// Binary `**` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs ** rhs)
Pow,
/// Binary `>>` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs >> rhs)
ShiftRight,
/// Binary `<<` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs << rhs)
ShiftLeft,
/// Binary `>>>` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs >>> rhs)
UnsignedShiftRight,
/// Binary bitwise `|` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs | rhs)
BitOr,
/// Binary bitwise `&` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs & rhs)
BitAnd,
/// Binary bitwise `^` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs ^ rhs)
BitXor,
/// Unary bitwise `~` operator.
///
/// Operands:
///
/// Stack: value **=>** ~value
BitNot,
/// Binary `in` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs `in` rhs)
In,
/// Binary `==` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs `==` rhs)
Eq,
/// Binary `===` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs `===` rhs)
StrictEq,
/// Binary `!=` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs `!=` rhs)
NotEq,
/// Binary `!==` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs `!==` rhs)
StrictNotEq,
/// Binary `>` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs > rhs)
GreaterThan,
/// Binary `>=` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs >= rhs)
GreaterThanOrEq,
/// Binary `<` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs < rhs)
LessThan,
/// Binary `<=` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs <= rhs)
LessThanOrEq,
/// Binary `instanceof` operator.
///
/// Operands:
///
/// Stack: lhs, rhs **=>** (lhs instanceof rhs)
InstanceOf,
/// Binary logical `&&` operator.
///
/// This is a short-circit operator, if the `lhs` value is `false`, then it jumps to `exit` address.
///
/// Operands: exit: `u32`
///
/// Stack: lhs, rhs **=>** (lhs && rhs)
LogicalAnd,
/// Binary logical `||` operator.
///
/// This is a short-circit operator, if the `lhs` value is `true`, then it jumps to `exit` address.
///
/// Operands: exit: `u32`
///
/// Stack: lhs, rhs **=>** (lhs || rhs)
LogicalOr,
/// Binary `??` operator.
///
/// This is a short-circit operator, if the `lhs` value is **not** `null` or `undefined`,
/// then it jumps to `exit` address.
///
/// Operands: exit: `u32`
///
/// Stack: lhs, rhs **=>** (lhs && rhs)
Coalesce,
/// Unary `typeof` operator.
///
/// Operands:
///
/// Stack: value **=>** (`typeof` value)
TypeOf,
/// Unary `void` operator.
///
/// Operands:
///
/// Stack: value **=>** `undefined`
Void,
/// Unary logical `!` operator.
///
/// Operands:
///
/// Stack: value **=>** (!value)
LogicalNot,
/// Unary `+` operator.
///
/// Operands:
///
/// Stack: value **=>** (+value)
Pos,
/// Unary `-` operator.
///
/// Operands:
///
/// Stack: value **=>** (-value)
Neg,
/// Declate `var` type variable.
///
/// Operands: name_index: `u32`
///
/// Stack: **=>**
DefVar,
/// Declate `let` type variable.
///
/// Operands: name_index: `u32`
///
/// Stack: **=>**
DefLet,
/// Declate `const` type variable.
///
/// Operands: name_index: `u32`
///
/// Stack: **=>**
DefConst,
/// Initialize a lexical binding.
///
/// Operands: name_index: `u32`
///
/// Stack: **=>**
InitLexical,
/// Find a binding on the environment chain and push its value.
///
/// Operands: name_index: `u32`
///
/// Stack: **=>** value
GetName,
/// Find a binding on the environment chain and assign its value.
///
/// Operands: name_index: `u32`
///
/// Stack: value **=>**
SetName,
/// Get a property by name from an object an push it on the stack.
///
/// Like `object.name`
///
/// Operands: name_index: `u32`
///
/// Stack: object **=>** value
GetPropertyByName,
/// Get a property by value from an object an push it on the stack.
///
/// Like `object[key]`
///
/// Operands:
///
/// Stack: key, object **=>** value
GetPropertyByValue,
/// Sets a property by name of an object.
///
/// Like `object.name = value`
///
/// Operands: name_index: `u32`
///
/// Stack: value, object **=>**
SetPropertyByName,
/// Sets a property by value of an object.
///
/// Like `object[key] = value`
///
/// Operands:
///
/// Stack: value, key, object **=>**
SetPropertyByValue,
/// Deletes a property by name of an object.
///
/// Like `delete object.key.`
///
/// Operands: name_index: `u32`
///
/// Stack: object **=>**
DeletePropertyByName,
/// Deletes a property by value of an object.
///
/// Like `delete object[key]`
///
/// Operands:
///
/// Stack: key, object **=>**
DeletePropertyByValue,
/// Unconditional jump to address.
///
/// Operands: address: `u32`
/// Stack: **=>**
Jump,
/// Constional jump to address.
///
/// If the value popped is [`falsy`][falsy] then jump to `address`.
///
/// Operands: address: `u32`
///
/// Stack: cond **=>**
///
/// [falsy]: https://developer.mozilla.org/en-US/docs/Glossary/Falsy
JumpIfFalse,
/// Constional jump to address.
///
/// If the value popped is [`truthy`][truthy] then jump to `address`.
///
/// Operands: address: `u32`
///
/// Stack: cond **=>**
///
/// [truthy]: https://developer.mozilla.org/en-US/docs/Glossary/Truthy
JumpIfTrue,
/// Throw exception
///
/// Operands:
///
/// Stack: `exc` **=>**
Throw,
/// Pops value converts it to boolean and pushes it back.
///
/// Operands:
///
/// Stack: value **=>** (`ToBoolean(value)`)
ToBoolean,
/// Pushes `this` value
///
/// Operands:
///
/// Stack: **=>** `this`
This,
/// Pop the two values of the stack, strict equal compares the two values,
/// if true jumps to address, otherwise push the second poped value.
///
/// Operands: address: `u32`
///
/// Stack: `value`, `cond` **=>** `cond` (if `cond !== value`).
Case,
/// Pops the top of stack and jump to address.
///
/// Operands: address: `u32`
///
/// Stack: `value` **=>**
Default,
/// Get function from the precompiled inner functions.
///
/// Operands: address: `u32`
///
/// Stack: **=>** `func`
GetFunction,
/// Call a function.
///
/// Operands: argc: `u32`
///
/// Stack: `func`, `this`, `arg1`, `arg2`,...`argn` **=>**
Call,
/// Return from a function.
Return,
/// No-operation instruction, does nothing.
///
/// Operands:
///
/// Stack: **=>**
// Safety: Must be last in the list since, we use this for range checking
// in TryFrom<u8> impl.
Nop,
}
impl Opcode {
/// Create opcode from `u8` byte.
///
/// # Safety
///
/// Does not check if `u8` type is a valid `Opcode`.
pub unsafe fn from_raw(value: u8) -> Self {
std::mem::transmute(value)
}
pub fn as_str(self) -> &'static str {
match self {
Opcode::Pop => "Pop",
Opcode::Dup => "Dup",
Opcode::Swap => "Swap",
Opcode::PushZero => "PushZero",
Opcode::PushOne => "PushOne",
Opcode::PushInt8 => "PushInt8",
Opcode::PushInt16 => "PushInt16",
Opcode::PushInt32 => "PushInt32",
Opcode::PushRational => "PushRational",
Opcode::PushNaN => "PushNaN",
Opcode::PushPositiveInfinity => "PushPositiveInfinity",
Opcode::PushNegativeInfinity => "PushNegativeInfinity",
Opcode::PushNull => "PushNull",
Opcode::PushTrue => "PushTrue",
Opcode::PushFalse => "PushFalse",
Opcode::PushUndefined => "PushUndefined",
Opcode::PushLiteral => "PushLiteral",
Opcode::PushEmptyObject => "PushEmptyObject",
Opcode::PushNewArray => "PushNewArray",
Opcode::Add => "Add",
Opcode::Sub => "Sub",
Opcode::Div => "Div",
Opcode::Mul => "Mul",
Opcode::Mod => "Mod",
Opcode::Pow => "Pow",
Opcode::ShiftRight => "ShiftRight",
Opcode::ShiftLeft => "ShiftLeft",
Opcode::UnsignedShiftRight => "UnsignedShiftRight",
Opcode::BitOr => "BitOr",
Opcode::BitAnd => "BitAnd",
Opcode::BitXor => "BitXor",
Opcode::BitNot => "BitNot",
Opcode::In => "In",
Opcode::Eq => "Eq",
Opcode::StrictEq => "StrictEq",
Opcode::NotEq => "NotEq",
Opcode::StrictNotEq => "StrictNotEq",
Opcode::GreaterThan => "GreaterThan",
Opcode::GreaterThanOrEq => "GreaterThanOrEq",
Opcode::LessThan => "LessThan",
Opcode::LessThanOrEq => "LessThanOrEq",
Opcode::InstanceOf => "InstanceOf",
Opcode::TypeOf => "TypeOf",
Opcode::Void => "Void",
Opcode::LogicalNot => "LogicalNot",
Opcode::LogicalAnd => "LogicalAnd",
Opcode::LogicalOr => "LogicalOr",
Opcode::Coalesce => "Coalesce",
Opcode::Pos => "Pos",
Opcode::Neg => "Neg",
Opcode::DefVar => "DefVar",
Opcode::DefLet => "DefLet",
Opcode::DefConst => "DefConst",
Opcode::InitLexical => "InitLexical",
Opcode::GetName => "GetName",
Opcode::SetName => "SetName",
Opcode::GetPropertyByName => "GetPropertyByName",
Opcode::GetPropertyByValue => "GetPropertyByValue",
Opcode::SetPropertyByName => "SetPropertyByName",
Opcode::SetPropertyByValue => "SetPropertyByValue",
Opcode::DeletePropertyByName => "DeletePropertyByName",
Opcode::DeletePropertyByValue => "DeletePropertyByValue",
Opcode::Jump => "Jump",
Opcode::JumpIfFalse => "JumpIfFalse",
Opcode::JumpIfTrue => "JumpIfTrue",
Opcode::Throw => "Throw",
Opcode::ToBoolean => "ToBoolean",
Opcode::This => "This",
Opcode::Case => "Case",
Opcode::Default => "Default",
Opcode::GetFunction => "GetFunction",
Opcode::Call => "Call",
Opcode::Return => "Return",
Opcode::Nop => "Nop",
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct InvalidOpcodeError {
value: u8,
}
impl std::fmt::Display for InvalidOpcodeError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "invalid opcode: {:#04x}", self.value)
}
}
impl std::error::Error for InvalidOpcodeError {}
impl TryFrom<u8> for Opcode {
type Error = InvalidOpcodeError;
fn try_from(value: u8) -> Result<Self, Self::Error> {
if value > Self::Nop as u8 {
return Err(InvalidOpcodeError { value });
}
// Safety: we already checked if it is in the Opcode range,
// so this is safe.
let opcode = unsafe { Self::from_raw(value) };
Ok(opcode)
}
}
| 22.928571 | 104 | 0.47722 |
e2c239d6da11c363a48a8aff96bdf3f5c683e1c3 | 3,981 | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Mutex;
use std::time::{Duration, Instant};
use kvproto::pdpb::*;
use protobuf::RepeatedField;
use super::*;
pub const LEADER_INTERVAL_SEC: u64 = 2;
#[derive(Debug)]
struct Roulette {
ts: Instant,
idx: usize,
}
#[derive(Debug)]
struct Inner {
resps: Vec<GetMembersResponse>,
r: Roulette,
}
#[derive(Debug)]
pub struct LeaderChange {
inner: Mutex<Inner>,
}
impl LeaderChange {
pub fn new() -> LeaderChange {
LeaderChange {
inner: Mutex::new(Inner {
resps: vec![],
r: Roulette {
ts: Instant::now(),
idx: 0,
},
}),
}
}
pub fn get_leader_interval() -> Duration {
Duration::from_secs(LEADER_INTERVAL_SEC)
}
}
const DEAD_ID: u64 = 1000;
const DEAD_NAME: &str = "walking_dead";
const DEAD_URL: &str = "127.0.0.1:65534";
impl PdMocker for LeaderChange {
fn get_members(&self, _: &GetMembersRequest) -> Option<Result<GetMembersResponse>> {
let mut inner = self.inner.lock().unwrap();
let now = Instant::now();
if now - inner.r.ts > LeaderChange::get_leader_interval() {
inner.r.idx += 1;
inner.r.ts = now;
return Some(Err("not leader".to_owned()));
}
info!(
"[LeaderChange] get_members: {:?}",
inner.resps[inner.r.idx % inner.resps.len()]
);
Some(Ok(inner.resps[inner.r.idx % inner.resps.len()].clone()))
}
fn get_region_by_id(&self, _: &GetRegionByIDRequest) -> Option<Result<GetRegionResponse>> {
let mut inner = self.inner.lock().unwrap();
let now = Instant::now();
if now.duration_since(inner.r.ts) > LeaderChange::get_leader_interval() {
inner.r.idx += 1;
inner.r.ts = now;
debug!(
"[LeaderChange] change leader to {:?}",
inner.resps[inner.r.idx % inner.resps.len()].get_leader()
);
}
Some(Err("not leader".to_owned()))
}
fn set_endpoints(&self, eps: Vec<String>) {
let mut members = Vec::with_capacity(eps.len());
for (i, ep) in (&eps).into_iter().enumerate() {
let mut m = Member::new();
m.set_name(format!("pd{}", i));
m.set_member_id(100 + i as u64);
m.set_client_urls(RepeatedField::from_vec(vec![ep.to_owned()]));
m.set_peer_urls(RepeatedField::from_vec(vec![ep.to_owned()]));
members.push(m);
}
// A dead PD
let mut m = Member::new();
m.set_member_id(DEAD_ID);
m.set_name(DEAD_NAME.to_owned());
m.set_client_urls(RepeatedField::from_vec(vec![DEAD_URL.to_owned()]));
m.set_peer_urls(RepeatedField::from_vec(vec![DEAD_URL.to_owned()]));
members.push(m);
let mut header = ResponseHeader::new();
header.set_cluster_id(1);
let mut resps = Vec::with_capacity(eps.len());
for (i, _) in (&eps).into_iter().enumerate() {
let mut resp = GetMembersResponse::new();
resp.set_header(header.clone());
resp.set_members(RepeatedField::from_vec(members.clone()));
resp.set_leader(members[i].clone());
resps.push(resp);
}
info!("[LeaerChange] set_endpoints {:?}", resps);
let mut inner = self.inner.lock().unwrap();
inner.resps = resps;
}
}
| 30.389313 | 95 | 0.577744 |
b94a7f876ab0c854b651bbd684cb97d75ead5c84 | 8,706 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub fn serialize_operation_crate_operation_accept_invitation(
input: &crate::input::AcceptInvitationInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_accept_invitation_input(&mut object, input)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_create_graph(
input: &crate::input::CreateGraphInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_create_graph_input(&mut object, input)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_create_members(
input: &crate::input::CreateMembersInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_create_members_input(&mut object, input)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_delete_graph(
input: &crate::input::DeleteGraphInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_delete_graph_input(&mut object, input)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_delete_members(
input: &crate::input::DeleteMembersInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_delete_members_input(&mut object, input)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_describe_organization_configuration(
input: &crate::input::DescribeOrganizationConfigurationInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_describe_organization_configuration_input(
&mut object,
input,
)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_disassociate_membership(
input: &crate::input::DisassociateMembershipInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_disassociate_membership_input(
&mut object,
input,
)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_enable_organization_admin_account(
input: &crate::input::EnableOrganizationAdminAccountInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_enable_organization_admin_account_input(
&mut object,
input,
)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_get_members(
input: &crate::input::GetMembersInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_get_members_input(&mut object, input)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_list_graphs(
input: &crate::input::ListGraphsInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_list_graphs_input(&mut object, input)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_list_invitations(
input: &crate::input::ListInvitationsInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_list_invitations_input(&mut object, input)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_list_members(
input: &crate::input::ListMembersInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_list_members_input(&mut object, input)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_list_organization_admin_accounts(
input: &crate::input::ListOrganizationAdminAccountsInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_list_organization_admin_accounts_input(
&mut object,
input,
)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_reject_invitation(
input: &crate::input::RejectInvitationInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_reject_invitation_input(&mut object, input)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_start_monitoring_member(
input: &crate::input::StartMonitoringMemberInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_start_monitoring_member_input(
&mut object,
input,
)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_tag_resource(
input: &crate::input::TagResourceInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_tag_resource_input(&mut object, input)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
pub fn serialize_operation_crate_operation_update_organization_configuration(
input: &crate::input::UpdateOrganizationConfigurationInput,
) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> {
let mut out = String::new();
let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out);
crate::json_ser::serialize_structure_crate_input_update_organization_configuration_input(
&mut object,
input,
)?;
object.finish();
Ok(aws_smithy_http::body::SdkBody::from(out))
}
| 46.063492 | 98 | 0.753733 |
509ea5012066eb1ec87811dd33fe387a08299fd7 | 158 | #[test]
fn ui() {
let t = trybuild::TestCases::new();
t.pass("tests/ui/01-api-sanity-check.rs");
t.compile_fail("tests/ui/02-invalid-path.rs");
}
| 22.571429 | 50 | 0.613924 |
1e9ec229aa304a230d1fb5f2c41f61184cf643cd | 746 | use crate::fs::DirEntry;
use crate::wasi::types;
/// Iterator over the entries in a directory.
///
/// This corresponds to [`std::fs::ReadDir`].
///
/// TODO: Not yet implemented.
///
/// [`std::fs::ReadDir`]: https://doc.rust-lang.org/std/fs/struct.ReadDir.html
pub struct ReadDir {
fd: types::Fd,
}
impl ReadDir {
/// Constructs a new instance of `Self` from the given raw WASI file descriptor.
pub unsafe fn from_raw_wasi_fd(fd: types::Fd) -> Self {
Self { fd }
}
}
/// TODO: Not yet implemented.
impl Iterator for ReadDir {
type Item = DirEntry;
/// TODO: Not yet implemented.
fn next(&mut self) -> Option<Self::Item> {
unimplemented!("ReadDir::next");
}
}
// TODO: impl Debug for ReadDir
| 22.606061 | 84 | 0.628686 |
de7eeb57de10d7143351c8b9efc6dc91b6a12e78 | 1,084 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// This code used to produce the following ICE:
//
// error: internal compiler error: get_unique_type_id_of_type() -
// unexpected type: closure,
// ty_closure(syntax::ast::DefId{krate: 0, node: 66},
// ReScope(63))
//
// This is a regression test for issue #17021.
//
// compile-flags: -g
#![feature(unboxed_closures)]
use std::ptr;
pub fn replace_map<'a, T, F>(src: &mut T, prod: F) where F: FnOnce(T) -> T {
unsafe { *src = prod(ptr::read(src as *mut T as *const T)); }
}
pub fn main() {
let mut a = 7;
let b = &mut a;
replace_map(b, |x: usize| x * 2);
assert_eq!(*b, 14);
}
| 29.297297 | 76 | 0.664207 |
b9639b35fa3ee23d2539ca862d9bac139ec0767b | 79,366 | use {
crate::nonce_keyed_account::NonceKeyedAccount,
log::*,
solana_program_runtime::{ic_msg, invoke_context::InvokeContext},
solana_sdk::{
account::{AccountSharedData, ReadableAccount, WritableAccount},
account_utils::StateMut,
feature_set,
instruction::InstructionError,
keyed_account::{from_keyed_account, get_signers, keyed_account_at_index, KeyedAccount},
nonce,
program_utils::limited_deserialize,
pubkey::Pubkey,
system_instruction::{
NonceError, SystemError, SystemInstruction, MAX_PERMITTED_DATA_LENGTH,
},
system_program,
sysvar::{self, rent::Rent},
},
std::collections::HashSet,
};
// represents an address that may or may not have been generated
// from a seed
#[derive(PartialEq, Default, Debug)]
struct Address {
address: Pubkey,
base: Option<Pubkey>,
}
impl Address {
fn is_signer(&self, signers: &HashSet<Pubkey>) -> bool {
if let Some(base) = self.base {
signers.contains(&base)
} else {
signers.contains(&self.address)
}
}
fn create(
address: &Pubkey,
with_seed: Option<(&Pubkey, &str, &Pubkey)>,
invoke_context: &InvokeContext,
) -> Result<Self, InstructionError> {
let base = if let Some((base, seed, owner)) = with_seed {
let address_with_seed = Pubkey::create_with_seed(base, seed, owner)?;
// re-derive the address, must match the supplied address
if *address != address_with_seed {
ic_msg!(
invoke_context,
"Create: address {} does not match derived address {}",
address,
address_with_seed
);
return Err(SystemError::AddressWithSeedMismatch.into());
}
Some(*base)
} else {
None
};
Ok(Self {
address: *address,
base,
})
}
}
fn allocate(
account: &mut AccountSharedData,
address: &Address,
space: u64,
signers: &HashSet<Pubkey>,
invoke_context: &InvokeContext,
) -> Result<(), InstructionError> {
if !address.is_signer(signers) {
ic_msg!(
invoke_context,
"Allocate: 'to' account {:?} must sign",
address
);
return Err(InstructionError::MissingRequiredSignature);
}
// if it looks like the `to` account is already in use, bail
// (note that the id check is also enforced by message_processor)
if !account.data().is_empty() || !system_program::check_id(account.owner()) {
ic_msg!(
invoke_context,
"Allocate: account {:?} already in use",
address
);
return Err(SystemError::AccountAlreadyInUse.into());
}
if space > MAX_PERMITTED_DATA_LENGTH {
ic_msg!(
invoke_context,
"Allocate: requested {}, max allowed {}",
space,
MAX_PERMITTED_DATA_LENGTH
);
return Err(SystemError::InvalidAccountDataLength.into());
}
account.set_data(vec![0; space as usize]);
Ok(())
}
fn assign(
account: &mut AccountSharedData,
address: &Address,
owner: &Pubkey,
signers: &HashSet<Pubkey>,
invoke_context: &InvokeContext,
) -> Result<(), InstructionError> {
// no work to do, just return
if account.owner() == owner {
return Ok(());
}
if !address.is_signer(signers) {
ic_msg!(invoke_context, "Assign: account {:?} must sign", address);
return Err(InstructionError::MissingRequiredSignature);
}
// bpf programs are allowed to do this; so this is inconsistent...
// Thus, we're starting to remove this restriction from system instruction
// processor for consistency and fewer special casing by piggybacking onto
// the related feature gate..
let rent_for_sysvars = invoke_context
.feature_set
.is_active(&feature_set::rent_for_sysvars::id());
if !rent_for_sysvars && sysvar::check_id(owner) {
// guard against sysvars being made
ic_msg!(invoke_context, "Assign: cannot assign to sysvar, {}", owner);
return Err(SystemError::InvalidProgramId.into());
}
account.set_owner(*owner);
Ok(())
}
fn allocate_and_assign(
to: &mut AccountSharedData,
to_address: &Address,
space: u64,
owner: &Pubkey,
signers: &HashSet<Pubkey>,
invoke_context: &InvokeContext,
) -> Result<(), InstructionError> {
allocate(to, to_address, space, signers, invoke_context)?;
assign(to, to_address, owner, signers, invoke_context)
}
fn create_account(
from: &KeyedAccount,
to: &KeyedAccount,
to_address: &Address,
lamports: u64,
space: u64,
owner: &Pubkey,
signers: &HashSet<Pubkey>,
invoke_context: &InvokeContext,
) -> Result<(), InstructionError> {
// if it looks like the `to` account is already in use, bail
{
let to = &mut to.try_account_ref_mut()?;
if to.lamports() > 0 {
ic_msg!(
invoke_context,
"Create Account: account {:?} already in use",
to_address
);
return Err(SystemError::AccountAlreadyInUse.into());
}
allocate_and_assign(to, to_address, space, owner, signers, invoke_context)?;
}
transfer(from, to, lamports, invoke_context)
}
fn transfer_verified(
from: &KeyedAccount,
to: &KeyedAccount,
lamports: u64,
invoke_context: &InvokeContext,
) -> Result<(), InstructionError> {
if !from.data_is_empty()? {
ic_msg!(invoke_context, "Transfer: `from` must not carry data");
return Err(InstructionError::InvalidArgument);
}
if lamports > from.lamports()? {
ic_msg!(
invoke_context,
"Transfer: insufficient lamports {}, need {}",
from.lamports()?,
lamports
);
return Err(SystemError::ResultWithNegativeLamports.into());
}
from.try_account_ref_mut()?.checked_sub_lamports(lamports)?;
to.try_account_ref_mut()?.checked_add_lamports(lamports)?;
Ok(())
}
fn transfer(
from: &KeyedAccount,
to: &KeyedAccount,
lamports: u64,
invoke_context: &InvokeContext,
) -> Result<(), InstructionError> {
if !invoke_context
.feature_set
.is_active(&feature_set::system_transfer_zero_check::id())
&& lamports == 0
{
return Ok(());
}
if from.signer_key().is_none() {
ic_msg!(
invoke_context,
"Transfer: `from` account {} must sign",
from.unsigned_key()
);
return Err(InstructionError::MissingRequiredSignature);
}
transfer_verified(from, to, lamports, invoke_context)
}
fn transfer_with_seed(
from: &KeyedAccount,
from_base: &KeyedAccount,
from_seed: &str,
from_owner: &Pubkey,
to: &KeyedAccount,
lamports: u64,
invoke_context: &InvokeContext,
) -> Result<(), InstructionError> {
if !invoke_context
.feature_set
.is_active(&feature_set::system_transfer_zero_check::id())
&& lamports == 0
{
return Ok(());
}
if from_base.signer_key().is_none() {
ic_msg!(
invoke_context,
"Transfer: 'from' account {:?} must sign",
from_base
);
return Err(InstructionError::MissingRequiredSignature);
}
let address_from_seed =
Pubkey::create_with_seed(from_base.unsigned_key(), from_seed, from_owner)?;
if *from.unsigned_key() != address_from_seed {
ic_msg!(
invoke_context,
"Transfer: 'from' address {} does not match derived address {}",
from.unsigned_key(),
address_from_seed
);
return Err(SystemError::AddressWithSeedMismatch.into());
}
transfer_verified(from, to, lamports, invoke_context)
}
pub fn process_instruction(
first_instruction_account: usize,
instruction_data: &[u8],
invoke_context: &mut InvokeContext,
) -> Result<(), InstructionError> {
let keyed_accounts = invoke_context.get_keyed_accounts()?;
let instruction = limited_deserialize(instruction_data)?;
trace!("process_instruction: {:?}", instruction);
trace!("keyed_accounts: {:?}", keyed_accounts);
let _ = keyed_account_at_index(keyed_accounts, first_instruction_account)?;
let signers = get_signers(&keyed_accounts[first_instruction_account..]);
match instruction {
SystemInstruction::CreateAccount {
lamports,
space,
owner,
} => {
let from = keyed_account_at_index(keyed_accounts, first_instruction_account)?;
let to = keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?;
let to_address = Address::create(to.unsigned_key(), None, invoke_context)?;
create_account(
from,
to,
&to_address,
lamports,
space,
&owner,
&signers,
invoke_context,
)
}
SystemInstruction::CreateAccountWithSeed {
base,
seed,
lamports,
space,
owner,
} => {
let from = keyed_account_at_index(keyed_accounts, first_instruction_account)?;
let to = keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?;
let to_address = Address::create(
to.unsigned_key(),
Some((&base, &seed, &owner)),
invoke_context,
)?;
create_account(
from,
to,
&to_address,
lamports,
space,
&owner,
&signers,
invoke_context,
)
}
SystemInstruction::Assign { owner } => {
let keyed_account = keyed_account_at_index(keyed_accounts, first_instruction_account)?;
let mut account = keyed_account.try_account_ref_mut()?;
let address = Address::create(keyed_account.unsigned_key(), None, invoke_context)?;
assign(&mut account, &address, &owner, &signers, invoke_context)
}
SystemInstruction::Transfer { lamports } => {
let from = keyed_account_at_index(keyed_accounts, first_instruction_account)?;
let to = keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?;
transfer(from, to, lamports, invoke_context)
}
SystemInstruction::TransferWithSeed {
lamports,
from_seed,
from_owner,
} => {
let from = keyed_account_at_index(keyed_accounts, first_instruction_account)?;
let base = keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?;
let to = keyed_account_at_index(keyed_accounts, first_instruction_account + 2)?;
transfer_with_seed(
from,
base,
&from_seed,
&from_owner,
to,
lamports,
invoke_context,
)
}
SystemInstruction::AdvanceNonceAccount => {
let me = &mut keyed_account_at_index(keyed_accounts, first_instruction_account)?;
#[allow(deprecated)]
if from_keyed_account::<solana_sdk::sysvar::recent_blockhashes::RecentBlockhashes>(
keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?,
)?
.is_empty()
{
ic_msg!(
invoke_context,
"Advance nonce account: recent blockhash list is empty",
);
return Err(NonceError::NoRecentBlockhashes.into());
}
me.advance_nonce_account(&signers, invoke_context)
}
SystemInstruction::WithdrawNonceAccount(lamports) => {
let me = &mut keyed_account_at_index(keyed_accounts, first_instruction_account)?;
let to = &mut keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?;
#[allow(deprecated)]
let _ = from_keyed_account::<solana_sdk::sysvar::recent_blockhashes::RecentBlockhashes>(
keyed_account_at_index(keyed_accounts, first_instruction_account + 2)?,
)?;
me.withdraw_nonce_account(
lamports,
to,
&from_keyed_account::<Rent>(keyed_account_at_index(
keyed_accounts,
first_instruction_account + 3,
)?)?,
&signers,
invoke_context,
)
}
SystemInstruction::InitializeNonceAccount(authorized) => {
let me = &mut keyed_account_at_index(keyed_accounts, first_instruction_account)?;
#[allow(deprecated)]
if from_keyed_account::<solana_sdk::sysvar::recent_blockhashes::RecentBlockhashes>(
keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?,
)?
.is_empty()
{
ic_msg!(
invoke_context,
"Initialize nonce account: recent blockhash list is empty",
);
return Err(NonceError::NoRecentBlockhashes.into());
}
me.initialize_nonce_account(
&authorized,
&from_keyed_account::<Rent>(keyed_account_at_index(
keyed_accounts,
first_instruction_account + 2,
)?)?,
invoke_context,
)
}
SystemInstruction::AuthorizeNonceAccount(nonce_authority) => {
let me = &mut keyed_account_at_index(keyed_accounts, first_instruction_account)?;
me.authorize_nonce_account(&nonce_authority, &signers, invoke_context)
}
SystemInstruction::Allocate { space } => {
let keyed_account = keyed_account_at_index(keyed_accounts, first_instruction_account)?;
let mut account = keyed_account.try_account_ref_mut()?;
let address = Address::create(keyed_account.unsigned_key(), None, invoke_context)?;
allocate(&mut account, &address, space, &signers, invoke_context)
}
SystemInstruction::AllocateWithSeed {
base,
seed,
space,
owner,
} => {
let keyed_account = keyed_account_at_index(keyed_accounts, first_instruction_account)?;
let mut account = keyed_account.try_account_ref_mut()?;
let address = Address::create(
keyed_account.unsigned_key(),
Some((&base, &seed, &owner)),
invoke_context,
)?;
allocate_and_assign(
&mut account,
&address,
space,
&owner,
&signers,
invoke_context,
)
}
SystemInstruction::AssignWithSeed { base, seed, owner } => {
let keyed_account = keyed_account_at_index(keyed_accounts, first_instruction_account)?;
let mut account = keyed_account.try_account_ref_mut()?;
let address = Address::create(
keyed_account.unsigned_key(),
Some((&base, &seed, &owner)),
invoke_context,
)?;
assign(&mut account, &address, &owner, &signers, invoke_context)
}
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum SystemAccountKind {
System,
Nonce,
}
pub fn get_system_account_kind(account: &AccountSharedData) -> Option<SystemAccountKind> {
if system_program::check_id(account.owner()) {
if account.data().is_empty() {
Some(SystemAccountKind::System)
} else if account.data().len() == nonce::State::size() {
match account.state().ok()? {
nonce::state::Versions::Current(state) => match *state {
nonce::State::Initialized(_) => Some(SystemAccountKind::Nonce),
_ => None,
},
}
} else {
None
}
} else {
None
}
}
#[cfg(test)]
mod tests {
#[allow(deprecated)]
use solana_sdk::{
account::{self, Account, AccountSharedData},
client::SyncClient,
feature_set::FeatureSet,
genesis_config::create_genesis_config,
hash::{hash, Hash},
instruction::{AccountMeta, Instruction, InstructionError},
message::Message,
nonce, nonce_account, recent_blockhashes_account,
signature::{Keypair, Signer},
system_instruction, system_program, sysvar,
sysvar::recent_blockhashes::IterItem,
transaction::TransactionError,
transaction_context::TransactionContext,
};
use {
super::*,
crate::{bank::Bank, bank_client::BankClient},
bincode::serialize,
solana_program_runtime::invoke_context::{mock_process_instruction, InvokeContext},
std::{cell::RefCell, sync::Arc},
};
impl From<Pubkey> for Address {
fn from(address: Pubkey) -> Self {
Self {
address,
base: None,
}
}
}
fn process_instruction(
instruction_data: &[u8],
transaction_accounts: Vec<(Pubkey, AccountSharedData)>,
instruction_accounts: Vec<AccountMeta>,
expected_result: Result<(), InstructionError>,
) -> Vec<AccountSharedData> {
mock_process_instruction(
&system_program::id(),
Vec::new(),
instruction_data,
transaction_accounts,
instruction_accounts,
expected_result,
super::process_instruction,
)
}
fn create_default_account() -> AccountSharedData {
AccountSharedData::new(0, 0, &Pubkey::new_unique())
}
fn create_default_recent_blockhashes_account() -> AccountSharedData {
#[allow(deprecated)]
recent_blockhashes_account::create_account_with_data_for_test(
vec![IterItem(0u64, &Hash::default(), 0); sysvar::recent_blockhashes::MAX_ENTRIES]
.into_iter(),
)
}
fn create_default_rent_account() -> AccountSharedData {
account::create_account_shared_data_for_test(&Rent::free())
}
#[test]
fn test_create_account() {
let new_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_unique();
let to = Pubkey::new_unique();
let from_account = AccountSharedData::new(100, 0, &system_program::id());
let to_account = AccountSharedData::new(0, 0, &Pubkey::default());
let accounts = process_instruction(
&bincode::serialize(&SystemInstruction::CreateAccount {
lamports: 50,
space: 2,
owner: new_owner,
})
.unwrap(),
vec![(from, from_account), (to, to_account)],
vec![
AccountMeta {
pubkey: from,
is_signer: true,
is_writable: false,
},
AccountMeta {
pubkey: to,
is_signer: true,
is_writable: false,
},
],
Ok(()),
);
assert_eq!(accounts[0].lamports(), 50);
assert_eq!(accounts[1].lamports(), 50);
assert_eq!(accounts[1].owner(), &new_owner);
assert_eq!(accounts[1].data(), &[0, 0]);
}
#[test]
fn test_create_account_with_seed() {
let new_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_unique();
let seed = "shiny pepper";
let to = Pubkey::create_with_seed(&from, seed, &new_owner).unwrap();
let from_account = AccountSharedData::new(100, 0, &system_program::id());
let to_account = AccountSharedData::new(0, 0, &Pubkey::default());
let accounts = process_instruction(
&bincode::serialize(&SystemInstruction::CreateAccountWithSeed {
base: from,
seed: seed.to_string(),
lamports: 50,
space: 2,
owner: new_owner,
})
.unwrap(),
vec![(from, from_account), (to, to_account)],
vec![
AccountMeta {
pubkey: from,
is_signer: true,
is_writable: false,
},
AccountMeta {
pubkey: to,
is_signer: true,
is_writable: false,
},
],
Ok(()),
);
assert_eq!(accounts[0].lamports(), 50);
assert_eq!(accounts[1].lamports(), 50);
assert_eq!(accounts[1].owner(), &new_owner);
assert_eq!(accounts[1].data(), &[0, 0]);
}
#[test]
fn test_create_account_with_seed_separate_base_account() {
let new_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_unique();
let base = Pubkey::new_unique();
let seed = "shiny pepper";
let to = Pubkey::create_with_seed(&base, seed, &new_owner).unwrap();
let from_account = AccountSharedData::new(100, 0, &system_program::id());
let to_account = AccountSharedData::new(0, 0, &Pubkey::default());
let base_account = AccountSharedData::new(0, 0, &Pubkey::default());
let accounts = process_instruction(
&bincode::serialize(&SystemInstruction::CreateAccountWithSeed {
base,
seed: seed.to_string(),
lamports: 50,
space: 2,
owner: new_owner,
})
.unwrap(),
vec![(from, from_account), (to, to_account), (base, base_account)],
vec![
AccountMeta {
pubkey: from,
is_signer: true,
is_writable: false,
},
AccountMeta {
pubkey: to,
is_signer: false,
is_writable: false,
},
AccountMeta {
pubkey: base,
is_signer: true,
is_writable: false,
},
],
Ok(()),
);
assert_eq!(accounts[0].lamports(), 50);
assert_eq!(accounts[1].lamports(), 50);
assert_eq!(accounts[1].owner(), &new_owner);
assert_eq!(accounts[1].data(), &[0, 0]);
}
#[test]
fn test_address_create_with_seed_mismatch() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
let from = Pubkey::new_unique();
let seed = "dull boy";
let to = Pubkey::new_unique();
let owner = Pubkey::new_unique();
assert_eq!(
Address::create(&to, Some((&from, seed, &owner)), &invoke_context),
Err(SystemError::AddressWithSeedMismatch.into())
);
}
#[test]
fn test_create_account_with_seed_missing_sig() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
let new_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_unique();
let seed = "dull boy";
let to = Pubkey::create_with_seed(&from, seed, &new_owner).unwrap();
let from_account = RefCell::new(AccountSharedData::new(100, 0, &system_program::id()));
let to_account = RefCell::new(AccountSharedData::new(0, 0, &Pubkey::default()));
let to_address =
Address::create(&to, Some((&from, seed, &new_owner)), &invoke_context).unwrap();
assert_eq!(
create_account(
&KeyedAccount::new(&from, false, &from_account),
&KeyedAccount::new(&to, false, &to_account),
&to_address,
50,
2,
&new_owner,
&HashSet::new(),
&invoke_context,
),
Err(InstructionError::MissingRequiredSignature)
);
assert_eq!(from_account.borrow().lamports(), 100);
assert_eq!(*to_account.borrow(), AccountSharedData::default());
}
#[test]
fn test_create_with_zero_lamports() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
// create account with zero lamports transferred
let new_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_unique();
let from_account = RefCell::new(AccountSharedData::new(100, 0, &Pubkey::new_unique())); // not from system account
let to = Pubkey::new_unique();
let to_account = RefCell::new(AccountSharedData::new(0, 0, &Pubkey::default()));
assert_eq!(
create_account(
&KeyedAccount::new(&from, true, &from_account),
&KeyedAccount::new(&to, true, &to_account),
&to.into(),
0,
2,
&new_owner,
&[from, to].iter().cloned().collect::<HashSet<_>>(),
&invoke_context,
),
Ok(())
);
let from_lamports = from_account.borrow().lamports();
let to_lamports = to_account.borrow().lamports();
let to_owner = *to_account.borrow().owner();
assert_eq!(from_lamports, 100);
assert_eq!(to_lamports, 0);
assert_eq!(to_owner, new_owner);
assert_eq!(to_account.borrow().data(), &[0, 0]);
}
#[test]
fn test_create_negative_lamports() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
// Attempt to create account with more lamports than remaining in from_account
let new_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_unique();
let from_account = RefCell::new(AccountSharedData::new(100, 0, &system_program::id()));
let to = Pubkey::new_unique();
let to_account = RefCell::new(AccountSharedData::new(0, 0, &Pubkey::default()));
let result = create_account(
&KeyedAccount::new(&from, true, &from_account),
&KeyedAccount::new(&from, false, &to_account),
&to.into(),
150,
2,
&new_owner,
&[from, to].iter().cloned().collect::<HashSet<_>>(),
&invoke_context,
);
assert_eq!(result, Err(SystemError::ResultWithNegativeLamports.into()));
}
#[test]
fn test_request_more_than_allowed_data_length() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
let from_account = RefCell::new(AccountSharedData::new(100, 0, &system_program::id()));
let from = Pubkey::new_unique();
let to_account = RefCell::new(AccountSharedData::new(0, 0, &system_program::id()));
let to = Pubkey::new_unique();
let signers = &[from, to].iter().cloned().collect::<HashSet<_>>();
let address = &to.into();
// Trying to request more data length than permitted will result in failure
let result = create_account(
&KeyedAccount::new(&from, true, &from_account),
&KeyedAccount::new(&to, false, &to_account),
address,
50,
MAX_PERMITTED_DATA_LENGTH + 1,
&system_program::id(),
signers,
&invoke_context,
);
assert!(result.is_err());
assert_eq!(
result.err().unwrap(),
SystemError::InvalidAccountDataLength.into()
);
// Trying to request equal or less data length than permitted will be successful
let result = create_account(
&KeyedAccount::new(&from, true, &from_account),
&KeyedAccount::new(&to, false, &to_account),
address,
50,
MAX_PERMITTED_DATA_LENGTH,
&system_program::id(),
signers,
&invoke_context,
);
assert!(result.is_ok());
assert_eq!(to_account.borrow().lamports(), 50);
assert_eq!(
to_account.borrow().data().len() as u64,
MAX_PERMITTED_DATA_LENGTH
);
}
#[test]
fn test_create_already_in_use() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
// Attempt to create system account in account already owned by another program
let new_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_unique();
let from_account = RefCell::new(AccountSharedData::new(100, 0, &system_program::id()));
let original_program_owner = Pubkey::new(&[5; 32]);
let owned_key = Pubkey::new_unique();
let owned_account = RefCell::new(AccountSharedData::new(0, 0, &original_program_owner));
let unchanged_account = owned_account.clone();
let signers = &[from, owned_key].iter().cloned().collect::<HashSet<_>>();
let owned_address = owned_key.into();
let result = create_account(
&KeyedAccount::new(&from, true, &from_account),
&KeyedAccount::new(&owned_key, false, &owned_account),
&owned_address,
50,
2,
&new_owner,
signers,
&invoke_context,
);
assert_eq!(result, Err(SystemError::AccountAlreadyInUse.into()));
let from_lamports = from_account.borrow().lamports();
assert_eq!(from_lamports, 100);
assert_eq!(owned_account, unchanged_account);
// Attempt to create system account in account that already has data
let owned_account = RefCell::new(AccountSharedData::new(0, 1, &Pubkey::default()));
let unchanged_account = owned_account.borrow().clone();
let result = create_account(
&KeyedAccount::new(&from, true, &from_account),
&KeyedAccount::new(&owned_key, false, &owned_account),
&owned_address,
50,
2,
&new_owner,
signers,
&invoke_context,
);
assert_eq!(result, Err(SystemError::AccountAlreadyInUse.into()));
let from_lamports = from_account.borrow().lamports();
assert_eq!(from_lamports, 100);
assert_eq!(*owned_account.borrow(), unchanged_account);
// Attempt to create an account that already has lamports
let owned_account = RefCell::new(AccountSharedData::new(1, 0, &Pubkey::default()));
let unchanged_account = owned_account.borrow().clone();
let result = create_account(
&KeyedAccount::new(&from, true, &from_account),
&KeyedAccount::new(&owned_key, false, &owned_account),
&owned_address,
50,
2,
&new_owner,
signers,
&invoke_context,
);
assert_eq!(result, Err(SystemError::AccountAlreadyInUse.into()));
assert_eq!(from_lamports, 100);
assert_eq!(*owned_account.borrow(), unchanged_account);
}
#[test]
fn test_create_unsigned() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
// Attempt to create an account without signing the transfer
let new_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_unique();
let from_account = RefCell::new(AccountSharedData::new(100, 0, &system_program::id()));
let owned_key = Pubkey::new_unique();
let owned_account = RefCell::new(AccountSharedData::new(0, 0, &Pubkey::default()));
let owned_address = owned_key.into();
// Haven't signed from account
let result = create_account(
&KeyedAccount::new(&from, false, &from_account),
&KeyedAccount::new(&owned_key, false, &owned_account),
&owned_address,
50,
2,
&new_owner,
&[owned_key].iter().cloned().collect::<HashSet<_>>(),
&invoke_context,
);
assert_eq!(result, Err(InstructionError::MissingRequiredSignature));
// Haven't signed to account
let owned_account = RefCell::new(AccountSharedData::new(0, 0, &Pubkey::default()));
let result = create_account(
&KeyedAccount::new(&from, true, &from_account),
&KeyedAccount::new(&owned_key, true, &owned_account),
&owned_address,
50,
2,
&new_owner,
&[from].iter().cloned().collect::<HashSet<_>>(),
&invoke_context,
);
assert_eq!(result, Err(InstructionError::MissingRequiredSignature));
// Don't support unsigned creation with zero lamports (ephemeral account)
let owned_account = RefCell::new(AccountSharedData::new(0, 0, &Pubkey::default()));
let result = create_account(
&KeyedAccount::new(&from, false, &from_account),
&KeyedAccount::new(&owned_key, true, &owned_account),
&owned_address,
0,
2,
&new_owner,
&[owned_key].iter().cloned().collect::<HashSet<_>>(),
&invoke_context,
);
assert_eq!(result, Err(InstructionError::MissingRequiredSignature));
}
#[test]
fn test_create_sysvar_invalid_id_with_feature() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
// Attempt to create system account in account already owned by another program
let from = Pubkey::new_unique();
let from_account = RefCell::new(AccountSharedData::new(100, 0, &system_program::id()));
let to = Pubkey::new_unique();
let to_account = RefCell::new(AccountSharedData::new(0, 0, &system_program::id()));
let signers = [from, to].iter().cloned().collect::<HashSet<_>>();
let to_address = to.into();
// fail to create a sysvar::id() owned account
let result = create_account(
&KeyedAccount::new(&from, true, &from_account),
&KeyedAccount::new(&to, false, &to_account),
&to_address,
50,
2,
&sysvar::id(),
&signers,
&invoke_context,
);
assert_eq!(result, Ok(()));
}
#[test]
fn test_create_sysvar_invalid_id_without_feature() {
let mut feature_set = FeatureSet::all_enabled();
feature_set
.active
.remove(&feature_set::rent_for_sysvars::id());
feature_set
.inactive
.insert(feature_set::rent_for_sysvars::id());
let transaction_context = TransactionContext::new(Vec::new(), 1);
let mut invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
invoke_context.feature_set = Arc::new(feature_set);
// Attempt to create system account in account already owned by another program
let from = Pubkey::new_unique();
let from_account = RefCell::new(AccountSharedData::new(100, 0, &system_program::id()));
let to = Pubkey::new_unique();
let to_account = RefCell::new(AccountSharedData::new(0, 0, &system_program::id()));
let signers = [from, to].iter().cloned().collect::<HashSet<_>>();
let to_address = to.into();
let result = create_account(
&KeyedAccount::new(&from, true, &from_account),
&KeyedAccount::new(&to, false, &to_account),
&to_address,
50,
2,
&sysvar::id(),
&signers,
&invoke_context,
);
assert_eq!(result, Err(SystemError::InvalidProgramId.into()));
}
#[test]
fn test_create_data_populated() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
// Attempt to create system account in account with populated data
let new_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_unique();
let from_account = RefCell::new(AccountSharedData::new(100, 0, &system_program::id()));
let populated_key = Pubkey::new_unique();
let populated_account = RefCell::new(AccountSharedData::from(Account {
data: vec![0, 1, 2, 3],
..Account::default()
}));
let signers = [from, populated_key]
.iter()
.cloned()
.collect::<HashSet<_>>();
let populated_address = populated_key.into();
let result = create_account(
&KeyedAccount::new(&from, true, &from_account),
&KeyedAccount::new(&populated_key, false, &populated_account),
&populated_address,
50,
2,
&new_owner,
&signers,
&invoke_context,
);
assert_eq!(result, Err(SystemError::AccountAlreadyInUse.into()));
}
#[test]
fn test_create_from_account_is_nonce_fail() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
let nonce = Pubkey::new_unique();
let nonce_account = RefCell::new(
AccountSharedData::new_data(
42,
&nonce::state::Versions::new_current(nonce::State::Initialized(
nonce::state::Data::default(),
)),
&system_program::id(),
)
.unwrap(),
);
let from = KeyedAccount::new(&nonce, true, &nonce_account);
let new = Pubkey::new_unique();
let new_account = RefCell::new(AccountSharedData::new(0, 0, &system_program::id()));
let signers = [nonce, new].iter().cloned().collect::<HashSet<_>>();
let new_address = new.into();
let new_keyed_account = KeyedAccount::new(&new, false, &new_account);
assert_eq!(
create_account(
&from,
&new_keyed_account,
&new_address,
42,
0,
&Pubkey::new_unique(),
&signers,
&invoke_context,
),
Err(InstructionError::InvalidArgument),
);
}
#[test]
fn test_assign() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
let new_owner = Pubkey::new(&[9; 32]);
let pubkey = Pubkey::new_unique();
let mut account = AccountSharedData::new(100, 0, &system_program::id());
assert_eq!(
assign(
&mut account,
&pubkey.into(),
&new_owner,
&HashSet::new(),
&invoke_context,
),
Err(InstructionError::MissingRequiredSignature)
);
// no change, no signature needed
assert_eq!(
assign(
&mut account,
&pubkey.into(),
&system_program::id(),
&HashSet::new(),
&invoke_context,
),
Ok(())
);
process_instruction(
&bincode::serialize(&SystemInstruction::Assign { owner: new_owner }).unwrap(),
vec![(pubkey, account)],
vec![AccountMeta {
pubkey,
is_signer: true,
is_writable: false,
}],
Ok(()),
);
}
#[test]
fn test_assign_to_sysvar_with_feature() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
let new_owner = sysvar::id();
let from = Pubkey::new_unique();
let mut from_account = AccountSharedData::new(100, 0, &system_program::id());
assert_eq!(
assign(
&mut from_account,
&from.into(),
&new_owner,
&[from].iter().cloned().collect::<HashSet<_>>(),
&invoke_context,
),
Ok(())
);
}
#[test]
fn test_assign_to_sysvar_without_feature() {
let mut feature_set = FeatureSet::all_enabled();
feature_set
.active
.remove(&feature_set::rent_for_sysvars::id());
feature_set
.inactive
.insert(feature_set::rent_for_sysvars::id());
let transaction_context = TransactionContext::new(Vec::new(), 1);
let mut invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
invoke_context.feature_set = Arc::new(feature_set);
let new_owner = sysvar::id();
let from = Pubkey::new_unique();
let mut from_account = AccountSharedData::new(100, 0, &system_program::id());
assert_eq!(
assign(
&mut from_account,
&from.into(),
&new_owner,
&[from].iter().cloned().collect::<HashSet<_>>(),
&invoke_context,
),
Err(SystemError::InvalidProgramId.into())
);
}
#[test]
fn test_process_bogus_instruction() {
// Attempt to assign with no accounts
let instruction = SystemInstruction::Assign {
owner: Pubkey::new_unique(),
};
let data = serialize(&instruction).unwrap();
process_instruction(
&data,
Vec::new(),
Vec::new(),
Err(InstructionError::NotEnoughAccountKeys),
);
// Attempt to transfer with no destination
let from = Pubkey::new_unique();
let from_account = AccountSharedData::new(100, 0, &system_program::id());
let instruction = SystemInstruction::Transfer { lamports: 0 };
let data = serialize(&instruction).unwrap();
process_instruction(
&data,
vec![(from, from_account)],
vec![AccountMeta {
pubkey: from,
is_signer: true,
is_writable: false,
}],
Err(InstructionError::NotEnoughAccountKeys),
);
}
#[test]
fn test_transfer_lamports() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
let from = Pubkey::new_unique();
let from_account = RefCell::new(AccountSharedData::new(100, 0, &Pubkey::new(&[2; 32]))); // account owner should not matter
let to = Pubkey::new(&[3; 32]);
let to_account = RefCell::new(AccountSharedData::new(1, 0, &to)); // account owner should not matter
let from_keyed_account = KeyedAccount::new(&from, true, &from_account);
let to_keyed_account = KeyedAccount::new(&to, false, &to_account);
transfer(&from_keyed_account, &to_keyed_account, 50, &invoke_context).unwrap();
let from_lamports = from_keyed_account.account.borrow().lamports();
let to_lamports = to_keyed_account.account.borrow().lamports();
assert_eq!(from_lamports, 50);
assert_eq!(to_lamports, 51);
// Attempt to move more lamports than remaining in from_account
let from_keyed_account = KeyedAccount::new(&from, true, &from_account);
let result = transfer(&from_keyed_account, &to_keyed_account, 100, &invoke_context);
assert_eq!(result, Err(SystemError::ResultWithNegativeLamports.into()));
assert_eq!(from_keyed_account.account.borrow().lamports(), 50);
assert_eq!(to_keyed_account.account.borrow().lamports(), 51);
// test signed transfer of zero
assert!(transfer(&from_keyed_account, &to_keyed_account, 0, &invoke_context).is_ok());
assert_eq!(from_keyed_account.account.borrow().lamports(), 50);
assert_eq!(to_keyed_account.account.borrow().lamports(), 51);
// test unsigned transfer of zero
let from_keyed_account = KeyedAccount::new(&from, false, &from_account);
assert_eq!(
transfer(&from_keyed_account, &to_keyed_account, 0, &invoke_context),
Err(InstructionError::MissingRequiredSignature)
);
assert_eq!(from_keyed_account.account.borrow().lamports(), 50);
assert_eq!(to_keyed_account.account.borrow().lamports(), 51);
}
#[test]
fn test_transfer_with_seed() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
let base = Pubkey::new_unique();
let base_account = RefCell::new(AccountSharedData::new(100, 0, &Pubkey::new(&[2; 32]))); // account owner should not matter
let from_base_keyed_account = KeyedAccount::new(&base, true, &base_account);
let from_seed = "42";
let from_owner = system_program::id();
let from = Pubkey::create_with_seed(&base, from_seed, &from_owner).unwrap();
let from_account = RefCell::new(AccountSharedData::new(100, 0, &Pubkey::new(&[2; 32]))); // account owner should not matter
let to = Pubkey::new(&[3; 32]);
let to_account = RefCell::new(AccountSharedData::new(1, 0, &to)); // account owner should not matter
let from_keyed_account = KeyedAccount::new(&from, true, &from_account);
let to_keyed_account = KeyedAccount::new(&to, false, &to_account);
transfer_with_seed(
&from_keyed_account,
&from_base_keyed_account,
from_seed,
&from_owner,
&to_keyed_account,
50,
&invoke_context,
)
.unwrap();
let from_lamports = from_keyed_account.account.borrow().lamports();
let to_lamports = to_keyed_account.account.borrow().lamports();
assert_eq!(from_lamports, 50);
assert_eq!(to_lamports, 51);
// Attempt to move more lamports than remaining in from_account
let from_keyed_account = KeyedAccount::new(&from, true, &from_account);
let result = transfer_with_seed(
&from_keyed_account,
&from_base_keyed_account,
from_seed,
&from_owner,
&to_keyed_account,
100,
&invoke_context,
);
assert_eq!(result, Err(SystemError::ResultWithNegativeLamports.into()));
assert_eq!(from_keyed_account.account.borrow().lamports(), 50);
assert_eq!(to_keyed_account.account.borrow().lamports(), 51);
// test unsigned transfer of zero
let from_keyed_account = KeyedAccount::new(&from, false, &from_account);
assert!(transfer_with_seed(
&from_keyed_account,
&from_base_keyed_account,
from_seed,
&from_owner,
&to_keyed_account,
0,
&invoke_context,
)
.is_ok());
assert_eq!(from_keyed_account.account.borrow().lamports(), 50);
assert_eq!(to_keyed_account.account.borrow().lamports(), 51);
}
#[test]
fn test_transfer_lamports_from_nonce_account_fail() {
let transaction_context = TransactionContext::new(Vec::new(), 1);
let invoke_context = InvokeContext::new_mock(&transaction_context, &[]);
let from = Pubkey::new_unique();
let from_account = RefCell::new(
AccountSharedData::new_data(
100,
&nonce::state::Versions::new_current(nonce::State::Initialized(
nonce::state::Data {
authority: from,
..nonce::state::Data::default()
},
)),
&system_program::id(),
)
.unwrap(),
);
assert_eq!(
get_system_account_kind(&from_account.borrow()),
Some(SystemAccountKind::Nonce)
);
let to = Pubkey::new(&[3; 32]);
let to_account = RefCell::new(AccountSharedData::new(1, 0, &to)); // account owner should not matter
assert_eq!(
transfer(
&KeyedAccount::new(&from, true, &from_account),
&KeyedAccount::new(&to, false, &to_account),
50,
&invoke_context,
),
Err(InstructionError::InvalidArgument),
)
}
#[test]
fn test_allocate() {
let (genesis_config, mint_keypair) = create_genesis_config(100);
let bank = Bank::new_for_tests(&genesis_config);
let bank_client = BankClient::new(bank);
let alice_keypair = Keypair::new();
let alice_pubkey = alice_keypair.pubkey();
let seed = "seed";
let owner = Pubkey::new_unique();
let alice_with_seed = Pubkey::create_with_seed(&alice_pubkey, seed, &owner).unwrap();
bank_client
.transfer_and_confirm(50, &mint_keypair, &alice_pubkey)
.unwrap();
let allocate_with_seed = Message::new(
&[system_instruction::allocate_with_seed(
&alice_with_seed,
&alice_pubkey,
seed,
2,
&owner,
)],
Some(&alice_pubkey),
);
assert!(bank_client
.send_and_confirm_message(&[&alice_keypair], allocate_with_seed)
.is_ok());
let allocate = system_instruction::allocate(&alice_pubkey, 2);
assert!(bank_client
.send_and_confirm_instruction(&alice_keypair, allocate)
.is_ok());
}
fn with_create_zero_lamport<F>(callback: F)
where
F: Fn(&Bank),
{
solana_logger::setup();
let alice_keypair = Keypair::new();
let bob_keypair = Keypair::new();
let alice_pubkey = alice_keypair.pubkey();
let bob_pubkey = bob_keypair.pubkey();
let program = Pubkey::new_unique();
let collector = Pubkey::new_unique();
let mint_lamports = 10000;
let len1 = 123;
let len2 = 456;
// create initial bank and fund the alice account
let (genesis_config, mint_keypair) = create_genesis_config(mint_lamports);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let bank_client = BankClient::new_shared(&bank);
bank_client
.transfer_and_confirm(mint_lamports, &mint_keypair, &alice_pubkey)
.unwrap();
// create zero-lamports account to be cleaned
let bank = Arc::new(Bank::new_from_parent(&bank, &collector, bank.slot() + 1));
let bank_client = BankClient::new_shared(&bank);
let ix = system_instruction::create_account(&alice_pubkey, &bob_pubkey, 0, len1, &program);
let message = Message::new(&[ix], Some(&alice_keypair.pubkey()));
let r = bank_client.send_and_confirm_message(&[&alice_keypair, &bob_keypair], message);
assert!(r.is_ok());
// transfer some to bogus pubkey just to make previous bank (=slot) really cleanable
let bank = Arc::new(Bank::new_from_parent(&bank, &collector, bank.slot() + 1));
let bank_client = BankClient::new_shared(&bank);
bank_client
.transfer_and_confirm(50, &alice_keypair, &Pubkey::new_unique())
.unwrap();
// super fun time; callback chooses to .clean_accounts(None) or not
callback(&*bank);
// create a normal account at the same pubkey as the zero-lamports account
let bank = Arc::new(Bank::new_from_parent(&bank, &collector, bank.slot() + 1));
let bank_client = BankClient::new_shared(&bank);
let ix = system_instruction::create_account(&alice_pubkey, &bob_pubkey, 1, len2, &program);
let message = Message::new(&[ix], Some(&alice_pubkey));
let r = bank_client.send_and_confirm_message(&[&alice_keypair, &bob_keypair], message);
assert!(r.is_ok());
}
#[test]
fn test_create_zero_lamport_with_clean() {
with_create_zero_lamport(|bank| {
bank.freeze();
bank.squash();
bank.force_flush_accounts_cache();
// do clean and assert that it actually did its job
assert_eq!(3, bank.get_snapshot_storages(None).len());
bank.clean_accounts(false, false, None);
assert_eq!(2, bank.get_snapshot_storages(None).len());
});
}
#[test]
fn test_create_zero_lamport_without_clean() {
with_create_zero_lamport(|_| {
// just do nothing; this should behave identically with test_create_zero_lamport_with_clean
});
}
#[test]
fn test_assign_with_seed() {
let (genesis_config, mint_keypair) = create_genesis_config(100);
let bank = Bank::new_for_tests(&genesis_config);
let bank_client = BankClient::new(bank);
let alice_keypair = Keypair::new();
let alice_pubkey = alice_keypair.pubkey();
let seed = "seed";
let owner = Pubkey::new_unique();
let alice_with_seed = Pubkey::create_with_seed(&alice_pubkey, seed, &owner).unwrap();
bank_client
.transfer_and_confirm(50, &mint_keypair, &alice_pubkey)
.unwrap();
let assign_with_seed = Message::new(
&[system_instruction::assign_with_seed(
&alice_with_seed,
&alice_pubkey,
seed,
&owner,
)],
Some(&alice_pubkey),
);
assert!(bank_client
.send_and_confirm_message(&[&alice_keypair], assign_with_seed)
.is_ok());
}
#[test]
fn test_system_unsigned_transaction() {
let (genesis_config, alice_keypair) = create_genesis_config(100);
let alice_pubkey = alice_keypair.pubkey();
let mallory_keypair = Keypair::new();
let mallory_pubkey = mallory_keypair.pubkey();
// Fund to account to bypass AccountNotFound error
let bank = Bank::new_for_tests(&genesis_config);
let bank_client = BankClient::new(bank);
bank_client
.transfer_and_confirm(50, &alice_keypair, &mallory_pubkey)
.unwrap();
// Erroneously sign transaction with recipient account key
// No signature case is tested by bank `test_zero_signatures()`
let account_metas = vec![
AccountMeta::new(alice_pubkey, false),
AccountMeta::new(mallory_pubkey, true),
];
let malicious_instruction = Instruction::new_with_bincode(
system_program::id(),
&SystemInstruction::Transfer { lamports: 10 },
account_metas,
);
assert_eq!(
bank_client
.send_and_confirm_instruction(&mallory_keypair, malicious_instruction)
.unwrap_err()
.unwrap(),
TransactionError::InstructionError(0, InstructionError::MissingRequiredSignature)
);
assert_eq!(bank_client.get_balance(&alice_pubkey).unwrap(), 50);
assert_eq!(bank_client.get_balance(&mallory_pubkey).unwrap(), 50);
}
fn process_nonce_instruction(
instruction: Instruction,
expected_result: Result<(), InstructionError>,
) -> Vec<AccountSharedData> {
let transaction_accounts = instruction
.accounts
.iter()
.map(|meta| {
#[allow(deprecated)]
(
meta.pubkey,
if sysvar::recent_blockhashes::check_id(&meta.pubkey) {
create_default_recent_blockhashes_account()
} else if sysvar::rent::check_id(&meta.pubkey) {
account::create_account_shared_data_for_test(&Rent::free())
} else {
AccountSharedData::new(0, 0, &Pubkey::new_unique())
},
)
})
.collect();
process_instruction(
&instruction.data,
transaction_accounts,
instruction.accounts,
expected_result,
)
}
#[test]
fn test_process_nonce_ix_no_acc_data_fail() {
let none_address = Pubkey::new_unique();
process_nonce_instruction(
system_instruction::advance_nonce_account(&none_address, &none_address),
Err(InstructionError::InvalidAccountData),
);
}
#[test]
fn test_process_nonce_ix_no_keyed_accs_fail() {
process_instruction(
&serialize(&SystemInstruction::AdvanceNonceAccount).unwrap(),
Vec::new(),
Vec::new(),
Err(InstructionError::NotEnoughAccountKeys),
);
}
#[test]
fn test_process_nonce_ix_only_nonce_acc_fail() {
let pubkey = Pubkey::new_unique();
process_instruction(
&serialize(&SystemInstruction::AdvanceNonceAccount).unwrap(),
vec![(pubkey, create_default_account())],
vec![AccountMeta {
pubkey,
is_signer: true,
is_writable: true,
}],
Err(InstructionError::NotEnoughAccountKeys),
);
}
#[test]
fn test_process_nonce_ix_bad_recent_blockhash_state_fail() {
let pubkey = Pubkey::new_unique();
#[allow(deprecated)]
let blockhash_id = sysvar::recent_blockhashes::id();
process_instruction(
&serialize(&SystemInstruction::AdvanceNonceAccount).unwrap(),
vec![
(pubkey, create_default_account()),
(blockhash_id, create_default_account()),
],
vec![
AccountMeta {
pubkey,
is_signer: true,
is_writable: true,
},
AccountMeta {
pubkey: blockhash_id,
is_signer: false,
is_writable: false,
},
],
Err(InstructionError::InvalidArgument),
);
}
#[test]
fn test_process_nonce_ix_ok() {
let nonce_address = Pubkey::new_unique();
let nonce_account = nonce_account::create_account(1_000_000).into_inner();
#[allow(deprecated)]
let blockhash_id = sysvar::recent_blockhashes::id();
let accounts = process_instruction(
&serialize(&SystemInstruction::InitializeNonceAccount(nonce_address)).unwrap(),
vec![
(nonce_address, nonce_account),
(blockhash_id, create_default_recent_blockhashes_account()),
(sysvar::rent::id(), create_default_rent_account()),
],
vec![
AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
},
AccountMeta {
pubkey: blockhash_id,
is_signer: false,
is_writable: false,
},
AccountMeta {
pubkey: sysvar::rent::id(),
is_signer: false,
is_writable: false,
},
],
Ok(()),
);
let blockhash = hash(&serialize(&0).unwrap());
#[allow(deprecated)]
let new_recent_blockhashes_account =
solana_sdk::recent_blockhashes_account::create_account_with_data_for_test(
vec![IterItem(0u64, &blockhash, 0); sysvar::recent_blockhashes::MAX_ENTRIES]
.into_iter(),
);
mock_process_instruction(
&system_program::id(),
Vec::new(),
&serialize(&SystemInstruction::AdvanceNonceAccount).unwrap(),
vec![
(nonce_address, accounts[0].clone()),
(blockhash_id, new_recent_blockhashes_account),
],
vec![
AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
},
AccountMeta {
pubkey: blockhash_id,
is_signer: false,
is_writable: false,
},
],
Ok(()),
|first_instruction_account: usize,
instruction_data: &[u8],
invoke_context: &mut InvokeContext| {
invoke_context.blockhash = hash(&serialize(&0).unwrap());
super::process_instruction(
first_instruction_account,
instruction_data,
invoke_context,
)
},
);
}
#[test]
fn test_process_withdraw_ix_no_acc_data_fail() {
let nonce_address = Pubkey::new_unique();
process_nonce_instruction(
system_instruction::withdraw_nonce_account(
&nonce_address,
&Pubkey::new_unique(),
&nonce_address,
1,
),
Err(InstructionError::InvalidAccountData),
);
}
#[test]
fn test_process_withdraw_ix_no_keyed_accs_fail() {
process_instruction(
&serialize(&SystemInstruction::WithdrawNonceAccount(42)).unwrap(),
Vec::new(),
Vec::new(),
Err(InstructionError::NotEnoughAccountKeys),
);
}
#[test]
fn test_process_withdraw_ix_only_nonce_acc_fail() {
let nonce_address = Pubkey::new_unique();
process_instruction(
&serialize(&SystemInstruction::WithdrawNonceAccount(42)).unwrap(),
vec![(nonce_address, create_default_account())],
vec![AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
}],
Err(InstructionError::NotEnoughAccountKeys),
);
}
#[test]
fn test_process_withdraw_ix_bad_recent_blockhash_state_fail() {
let nonce_address = Pubkey::new_unique();
let pubkey = Pubkey::new_unique();
#[allow(deprecated)]
let blockhash_id = sysvar::recent_blockhashes::id();
process_instruction(
&serialize(&SystemInstruction::WithdrawNonceAccount(42)).unwrap(),
vec![
(nonce_address, create_default_account()),
(pubkey, create_default_account()),
(blockhash_id, create_default_account()),
],
vec![
AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
},
AccountMeta {
pubkey,
is_signer: false,
is_writable: false,
},
AccountMeta {
pubkey: blockhash_id,
is_signer: false,
is_writable: false,
},
],
Err(InstructionError::InvalidArgument),
);
}
#[test]
fn test_process_withdraw_ix_bad_rent_state_fail() {
let nonce_address = Pubkey::new_unique();
let nonce_account = nonce_account::create_account(1_000_000).into_inner();
let pubkey = Pubkey::new_unique();
#[allow(deprecated)]
let blockhash_id = sysvar::recent_blockhashes::id();
process_instruction(
&serialize(&SystemInstruction::WithdrawNonceAccount(42)).unwrap(),
vec![
(nonce_address, nonce_account),
(pubkey, create_default_account()),
(blockhash_id, create_default_recent_blockhashes_account()),
(sysvar::rent::id(), create_default_account()),
],
vec![
AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
},
AccountMeta {
pubkey,
is_signer: true,
is_writable: false,
},
AccountMeta {
pubkey: blockhash_id,
is_signer: false,
is_writable: false,
},
AccountMeta {
pubkey: sysvar::rent::id(),
is_signer: false,
is_writable: false,
},
],
Err(InstructionError::InvalidArgument),
);
}
#[test]
fn test_process_withdraw_ix_ok() {
let nonce_address = Pubkey::new_unique();
let nonce_account = nonce_account::create_account(1_000_000).into_inner();
let pubkey = Pubkey::new_unique();
#[allow(deprecated)]
let blockhash_id = sysvar::recent_blockhashes::id();
process_instruction(
&serialize(&SystemInstruction::WithdrawNonceAccount(42)).unwrap(),
vec![
(nonce_address, nonce_account),
(pubkey, create_default_account()),
(blockhash_id, create_default_recent_blockhashes_account()),
(sysvar::rent::id(), create_default_rent_account()),
],
vec![
AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
},
AccountMeta {
pubkey,
is_signer: true,
is_writable: false,
},
AccountMeta {
pubkey: blockhash_id,
is_signer: false,
is_writable: false,
},
AccountMeta {
pubkey: sysvar::rent::id(),
is_signer: false,
is_writable: false,
},
],
Ok(()),
);
}
#[test]
fn test_process_initialize_ix_no_keyed_accs_fail() {
process_instruction(
&serialize(&SystemInstruction::InitializeNonceAccount(Pubkey::default())).unwrap(),
Vec::new(),
Vec::new(),
Err(InstructionError::NotEnoughAccountKeys),
);
}
#[test]
fn test_process_initialize_ix_only_nonce_acc_fail() {
let nonce_address = Pubkey::new_unique();
let nonce_account = nonce_account::create_account(1_000_000).into_inner();
process_instruction(
&serialize(&SystemInstruction::InitializeNonceAccount(nonce_address)).unwrap(),
vec![(nonce_address, nonce_account)],
vec![AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
}],
Err(InstructionError::NotEnoughAccountKeys),
);
}
#[test]
fn test_process_initialize_bad_recent_blockhash_state_fail() {
let nonce_address = Pubkey::new_unique();
let nonce_account = nonce_account::create_account(1_000_000).into_inner();
#[allow(deprecated)]
let blockhash_id = sysvar::recent_blockhashes::id();
process_instruction(
&serialize(&SystemInstruction::InitializeNonceAccount(nonce_address)).unwrap(),
vec![
(nonce_address, nonce_account),
(blockhash_id, create_default_account()),
],
vec![
AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
},
AccountMeta {
pubkey: blockhash_id,
is_signer: false,
is_writable: false,
},
],
Err(InstructionError::InvalidArgument),
);
}
#[test]
fn test_process_initialize_ix_bad_rent_state_fail() {
let nonce_address = Pubkey::new_unique();
let nonce_account = nonce_account::create_account(1_000_000).into_inner();
#[allow(deprecated)]
let blockhash_id = sysvar::recent_blockhashes::id();
process_instruction(
&serialize(&SystemInstruction::InitializeNonceAccount(nonce_address)).unwrap(),
vec![
(nonce_address, nonce_account),
(blockhash_id, create_default_recent_blockhashes_account()),
(sysvar::rent::id(), create_default_account()),
],
vec![
AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
},
AccountMeta {
pubkey: blockhash_id,
is_signer: false,
is_writable: false,
},
AccountMeta {
pubkey: sysvar::rent::id(),
is_signer: false,
is_writable: false,
},
],
Err(InstructionError::InvalidArgument),
);
}
#[test]
fn test_process_initialize_ix_ok() {
let nonce_address = Pubkey::new_unique();
let nonce_account = nonce_account::create_account(1_000_000).into_inner();
#[allow(deprecated)]
let blockhash_id = sysvar::recent_blockhashes::id();
process_instruction(
&serialize(&SystemInstruction::InitializeNonceAccount(nonce_address)).unwrap(),
vec![
(nonce_address, nonce_account),
(blockhash_id, create_default_recent_blockhashes_account()),
(sysvar::rent::id(), create_default_rent_account()),
],
vec![
AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
},
AccountMeta {
pubkey: blockhash_id,
is_signer: false,
is_writable: false,
},
AccountMeta {
pubkey: sysvar::rent::id(),
is_signer: false,
is_writable: false,
},
],
Ok(()),
);
}
#[test]
fn test_process_authorize_ix_ok() {
let nonce_address = Pubkey::new_unique();
let nonce_account = nonce_account::create_account(1_000_000).into_inner();
#[allow(deprecated)]
let blockhash_id = sysvar::recent_blockhashes::id();
let accounts = process_instruction(
&serialize(&SystemInstruction::InitializeNonceAccount(nonce_address)).unwrap(),
vec![
(nonce_address, nonce_account),
(blockhash_id, create_default_recent_blockhashes_account()),
(sysvar::rent::id(), create_default_rent_account()),
],
vec![
AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
},
AccountMeta {
pubkey: blockhash_id,
is_signer: false,
is_writable: false,
},
AccountMeta {
pubkey: sysvar::rent::id(),
is_signer: false,
is_writable: false,
},
],
Ok(()),
);
process_instruction(
&serialize(&SystemInstruction::AuthorizeNonceAccount(nonce_address)).unwrap(),
vec![(nonce_address, accounts[0].clone())],
vec![AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
}],
Ok(()),
);
}
#[test]
fn test_process_authorize_bad_account_data_fail() {
let nonce_address = Pubkey::new_unique();
process_nonce_instruction(
system_instruction::authorize_nonce_account(
&nonce_address,
&Pubkey::new_unique(),
&nonce_address,
),
Err(InstructionError::InvalidAccountData),
);
}
#[test]
fn test_get_system_account_kind_system_ok() {
let system_account = AccountSharedData::default();
assert_eq!(
get_system_account_kind(&system_account),
Some(SystemAccountKind::System)
);
}
#[test]
fn test_get_system_account_kind_nonce_ok() {
let nonce_account = AccountSharedData::new_data(
42,
&nonce::state::Versions::new_current(nonce::State::Initialized(
nonce::state::Data::default(),
)),
&system_program::id(),
)
.unwrap();
assert_eq!(
get_system_account_kind(&nonce_account),
Some(SystemAccountKind::Nonce)
);
}
#[test]
fn test_get_system_account_kind_uninitialized_nonce_account_fail() {
assert_eq!(
get_system_account_kind(&nonce_account::create_account(42).borrow()),
None
);
}
#[test]
fn test_get_system_account_kind_system_owner_nonzero_nonnonce_data_fail() {
let other_data_account =
AccountSharedData::new_data(42, b"other", &Pubkey::default()).unwrap();
assert_eq!(get_system_account_kind(&other_data_account), None);
}
#[test]
fn test_get_system_account_kind_nonsystem_owner_with_nonce_data_fail() {
let nonce_account = AccountSharedData::new_data(
42,
&nonce::state::Versions::new_current(nonce::State::Initialized(
nonce::state::Data::default(),
)),
&Pubkey::new_unique(),
)
.unwrap();
assert_eq!(get_system_account_kind(&nonce_account), None);
}
#[test]
fn test_nonce_initialize_with_empty_recent_blockhashes_fail() {
let nonce_address = Pubkey::new_unique();
let nonce_account = nonce_account::create_account(1_000_000).into_inner();
#[allow(deprecated)]
let blockhash_id = sysvar::recent_blockhashes::id();
#[allow(deprecated)]
let new_recent_blockhashes_account =
solana_sdk::recent_blockhashes_account::create_account_with_data_for_test(
vec![].into_iter(),
);
process_instruction(
&serialize(&SystemInstruction::InitializeNonceAccount(nonce_address)).unwrap(),
vec![
(nonce_address, nonce_account),
(blockhash_id, new_recent_blockhashes_account),
(sysvar::rent::id(), create_default_rent_account()),
],
vec![
AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
},
AccountMeta {
pubkey: blockhash_id,
is_signer: false,
is_writable: false,
},
AccountMeta {
pubkey: sysvar::rent::id(),
is_signer: false,
is_writable: false,
},
],
Err(NonceError::NoRecentBlockhashes.into()),
);
}
#[test]
fn test_nonce_advance_with_empty_recent_blockhashes_fail() {
let nonce_address = Pubkey::new_unique();
let nonce_account = nonce_account::create_account(1_000_000).into_inner();
#[allow(deprecated)]
let blockhash_id = sysvar::recent_blockhashes::id();
let accounts = process_instruction(
&serialize(&SystemInstruction::InitializeNonceAccount(nonce_address)).unwrap(),
vec![
(nonce_address, nonce_account),
(blockhash_id, create_default_recent_blockhashes_account()),
(sysvar::rent::id(), create_default_rent_account()),
],
vec![
AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
},
AccountMeta {
pubkey: blockhash_id,
is_signer: false,
is_writable: false,
},
AccountMeta {
pubkey: sysvar::rent::id(),
is_signer: false,
is_writable: false,
},
],
Ok(()),
);
#[allow(deprecated)]
let new_recent_blockhashes_account =
solana_sdk::recent_blockhashes_account::create_account_with_data_for_test(
vec![].into_iter(),
);
mock_process_instruction(
&system_program::id(),
Vec::new(),
&serialize(&SystemInstruction::AdvanceNonceAccount).unwrap(),
vec![
(nonce_address, accounts[0].clone()),
(blockhash_id, new_recent_blockhashes_account),
],
vec![
AccountMeta {
pubkey: nonce_address,
is_signer: true,
is_writable: true,
},
AccountMeta {
pubkey: blockhash_id,
is_signer: false,
is_writable: false,
},
],
Err(NonceError::NoRecentBlockhashes.into()),
|first_instruction_account: usize,
instruction_data: &[u8],
invoke_context: &mut InvokeContext| {
invoke_context.blockhash = hash(&serialize(&0).unwrap());
super::process_instruction(
first_instruction_account,
instruction_data,
invoke_context,
)
},
);
}
}
| 36.273309 | 131 | 0.553247 |
29e7c2ef48d4fca06aedb9d1532ca1f2ecb9948a | 5,035 | use std::fs::File;
use std::io::Read;
use symbolic_unreal::{Unreal4Crash, Unreal4Error, Unreal4FileType};
fn get_unreal_crash() -> Result<Unreal4Crash, Unreal4Error> {
let mut file =
File::open("../testutils/fixtures/unreal/unreal_crash").expect("example file opens");
let mut file_content = Vec::new();
file.read_to_end(&mut file_content).expect("fixture file");
Unreal4Crash::parse(&file_content)
}
fn get_unreal_apple_crash() -> Result<Unreal4Crash, Unreal4Error> {
let mut file =
File::open("../testutils/fixtures/unreal/unreal_crash_apple").expect("example file opens");
let mut file_content = Vec::new();
file.read_to_end(&mut file_content).expect("fixture file");
Unreal4Crash::parse(&file_content)
}
#[test]
fn test_load_unreal_crash() {
get_unreal_crash().expect("crash file loaded");
}
#[test]
fn test_get_minidump_slice() {
let ue4_crash = get_unreal_crash().expect("test crash file loads");
let minidump_file = ue4_crash
.file_by_type(Unreal4FileType::Minidump)
.expect("expected minidump file exists");
assert_eq!(minidump_file.data().len(), 410_700);
let crash_file = ue4_crash
.native_crash()
.expect("expected native crash exists");
assert_eq!(minidump_file.index(), crash_file.index());
assert!(ue4_crash
.file_by_type(Unreal4FileType::AppleCrashReport)
.is_none());
}
#[test]
fn test_get_apple_crash_report() {
let ue4_crash = get_unreal_apple_crash().expect("test crash file loads");
let apple_file = ue4_crash
.file_by_type(Unreal4FileType::AppleCrashReport)
.expect("expected apple crash report file exists");
assert_eq!(apple_file.data().len(), 91_392);
let crash_file = ue4_crash
.native_crash()
.expect("expected native crash exists");
assert_eq!(apple_file.index(), crash_file.index());
assert!(ue4_crash.file_by_type(Unreal4FileType::Minidump).is_none());
}
#[test]
fn test_contexts_runtime_properties() {
let ue4_crash = get_unreal_crash().expect("test crash file loads");
let ue4_context = ue4_crash
.context()
.expect("no errors parsing the context file")
.expect("context file exists in sample crash");
let runtime_properties = ue4_context
.runtime_properties
.expect("runtime properties exist within sample crash");
assert_eq!(
"UE4CC-Windows-379993BB42BD8FBED67986857D8844B5_0000",
runtime_properties.crash_guid.expect("crash guid")
);
}
#[test]
fn test_contexts_platform_properties() {
let ue4_crash = get_unreal_crash().expect("test crash file loads");
let ue4_context = ue4_crash
.context()
.expect("no errors parsing the context file")
.expect("context file exists in sample crash");
let platform_properties = ue4_context
.platform_properties
.expect("platform properties exist within sample crash");
assert_eq!(
platform_properties
.is_windows
.expect("sample contains value as 1 for true"),
true
);
assert_eq!(
platform_properties
.callback_result
.expect("sample contains value 0"),
0
);
}
#[test]
fn test_files_api() {
let ue4_crash = get_unreal_crash().expect("test crash file loads");
assert_eq!(ue4_crash.file_count(), 4);
assert_eq!(ue4_crash.files().size_hint(), (4, Some(4)));
assert_eq!(
ue4_crash.file_by_index(0).expect("File exists").name(),
"CrashContext.runtime-xml"
);
assert_eq!(
ue4_crash.file_by_index(1).expect("File exists").name(),
"CrashReportClient.ini"
);
assert_eq!(
ue4_crash.file_by_index(2).expect("File exists").name(),
"MyProject.log"
);
assert_eq!(
ue4_crash.file_by_index(3).expect("File exists").name(),
"UE4Minidump.dmp"
);
let context_file = ue4_crash.file_by_index(0).expect("xml file in pos 0");
let xml = context_file.data();
assert_eq!(xml[0] as char, '<');
// there are two line breaks after closing tag:
assert_eq!(xml[xml.len() - 3] as char, '>');
}
#[test]
fn test_get_logs() {
let ue4_crash = get_unreal_crash().expect("test crash file loads");
let limit = 100;
let logs = ue4_crash.logs(limit).expect("log file");
assert_eq!(logs.len(), limit);
assert_eq!(
logs[1].timestamp.expect("timestamp").to_rfc3339(),
"2018-10-29T16:56:37+00:00"
);
assert_eq!(
logs[0].component.as_ref().expect("component"),
"LogD3D11RHI"
);
assert_eq!(logs[0].message, "Chosen D3D11 Adapter: 0");
assert_eq!(
logs[99].timestamp.expect("timestamp").to_rfc3339(),
"2018-10-29T16:56:38+00:00"
);
assert_eq!(
logs[99].component.as_ref().expect("component"),
"LogWindows"
);
assert_eq!(
logs[99].message,
"Windows GetLastError: The operation completed successfully. (0)"
);
}
| 28.607955 | 99 | 0.65005 |
dd2c561eae81a8d9aa0ff01357dfe66d32c5a73a | 19,656 | /// Contains functions for retrieving the values of some of the DWARF attributes.
pub mod attributes;
/// Contains structs representing the different Rust data types and more.
pub mod evaluate;
use crate::call_stack::MemoryAccess;
use crate::registers::Registers;
use anyhow::{anyhow, Result};
use evaluate::{convert_to_gimli_value, BaseTypeValue, EvaluatorValue};
use gimli::{
AttributeValue::UnitRef,
DebuggingInformationEntry, DieReference, Dwarf, Evaluation, EvaluationResult,
EvaluationResult::{
Complete, RequiresAtLocation, RequiresBaseType, RequiresCallFrameCfa, RequiresEntryValue,
RequiresFrameBase, RequiresIndexedAddress, RequiresMemory, RequiresParameterRef,
RequiresRegister, RequiresRelocatedAddress, RequiresTls,
},
Expression, Reader, Unit, UnitOffset,
};
use log::error;
use std::convert::TryInto;
/// Will find the DIE representing the type can evaluate the variable.
///
/// Description:
///
/// * `dwarf` - A reference to gimli-rs `Dwarf` struct.
/// * `pc` - A machine code address, usually the current code location.
/// * `expr` - The expression to be evaluated.
/// * `frame_base` - The frame base address value.
/// * `unit` - A compilation unit which contains the given DIE.
/// * `die` - The DIE the is used to find the DIE representing the type.
/// * `registers` - A register struct for accessing the register values.
/// * `mem` - A struct for accessing the memory of the debug target.
///
/// This function is used to find the DIE representing the type and then to evaluate the value of
/// the given DIE>
pub fn call_evaluate<R: Reader<Offset = usize>, T: MemoryAccess>(
dwarf: &Dwarf<R>,
pc: u32,
expr: gimli::Expression<R>,
frame_base: Option<u64>,
unit: &Unit<R>,
die: &DebuggingInformationEntry<R>,
registers: &Registers,
mem: &mut T,
) -> Result<EvaluatorValue<R>> {
if let Ok(Some(tattr)) = die.attr_value(gimli::DW_AT_type) {
match tattr {
gimli::AttributeValue::UnitRef(offset) => {
let die = unit.entry(offset)?;
return evaluate(
dwarf,
unit,
pc,
expr,
frame_base,
Some(unit),
Some(&die),
registers,
mem,
);
}
gimli::AttributeValue::DebugInfoRef(di_offset) => {
let offset = gimli::UnitSectionOffset::DebugInfoOffset(di_offset);
let mut iter = dwarf.debug_info.units();
while let Ok(Some(header)) = iter.next() {
let type_unit = dwarf.unit(header)?;
if let Some(offset) = offset.to_unit_offset(&type_unit) {
let die = type_unit.entry(offset)?;
return evaluate(
dwarf,
unit,
pc,
expr,
frame_base,
Some(&type_unit),
Some(&die),
registers,
mem,
);
}
}
error!("Unreachable");
return Err(anyhow!("Unreachable"));
}
attribute => {
error!("Unimplemented for attribute {:?}", attribute);
return Err(anyhow!("Unimplemented for attribute {:?}", attribute));
}
};
} else if let Ok(Some(die_offset)) = die.attr_value(gimli::DW_AT_abstract_origin) {
match die_offset {
UnitRef(offset) => {
if let Ok(ndie) = unit.entry(offset) {
return call_evaluate(dwarf, pc, expr, frame_base, unit, &ndie, registers, mem);
}
}
_ => {
error!("Unimplemented");
return Err(anyhow!("Unimplemented"));
}
};
}
error!("Unreachable");
return Err(anyhow!("Unreachable"));
}
/// Will evaluate the value of the given DWARF expression.
///
/// Description:
///
/// * `dwarf` - A reference to gimli-rs `Dwarf` struct.
/// * `unit` - A compilation unit which contains the given DIE.
/// * `pc` - A machine code address, usually the current code location.
/// * `expr` - The expression to be evaluated.
/// * `frame_base` - The frame base address value.
/// * `type_unit` - A compilation unit which contains the given DIE which represents the type of
/// the given expression. None if the expression does not have a type.
/// * `type_die` - The DIE the represents the type of the given expression. None if the expression
/// does not have a type.
/// * `registers` - A register struct for accessing the register values.
/// * `mem` - A struct for accessing the memory of the debug target.
///
/// This function will first evaluate the expression into gimli-rs `Piece`s.
/// Then it will use the pieces and the type too evaluate and parse the value.
pub fn evaluate<R: Reader<Offset = usize>, T: MemoryAccess>(
dwarf: &Dwarf<R>,
unit: &Unit<R>,
pc: u32,
expr: Expression<R>,
frame_base: Option<u64>,
type_unit: Option<&gimli::Unit<R>>,
type_die: Option<&gimli::DebuggingInformationEntry<'_, '_, R>>,
registers: &Registers,
mem: &mut T,
) -> Result<EvaluatorValue<R>> {
let pieces = evaluate_pieces(dwarf, unit, pc, expr, frame_base, registers, mem)?;
evaluate_value(dwarf, pieces, type_unit, type_die, registers, mem)
}
/// Will evaluate the value of the given list of gimli-rs `Piece`s.
///
/// Description:
///
/// * `dwarf` - A reference to gimli-rs `Dwarf` struct.
/// * `pieces` - A list of gimli-rs pieces containing the location information..
/// * `type_unit` - A compilation unit which contains the given DIE which represents the type of
/// the given expression. None if the expression does not have a type.
/// * `type_die` - The DIE the represents the type of the given expression. None if the expression
/// does not have a type.
/// * `registers` - A register struct for accessing the register values.
/// * `mem` - A struct for accessing the memory of the debug target.
///
/// Then it will use the pieces and the type too evaluate and parse the value.
pub fn evaluate_value<R: Reader<Offset = usize>, T: MemoryAccess>(
dwarf: &Dwarf<R>,
pieces: Vec<gimli::Piece<R>>,
type_unit: Option<&gimli::Unit<R>>,
type_die: Option<&gimli::DebuggingInformationEntry<'_, '_, R>>,
registers: &Registers,
mem: &mut T,
) -> Result<EvaluatorValue<R>> {
match type_unit {
Some(unit) => match type_die {
Some(die) => {
return EvaluatorValue::evaluate_variable_with_type(
dwarf,
registers,
mem,
&pieces,
unit.header.offset(),
die.offset(),
);
}
None => (),
},
None => (),
};
return EvaluatorValue::evaluate_variable(registers, mem, &pieces);
}
/// Evaluates a gimli-rs `Expression` into a `Vec` of `Piece`s.
///
/// Description:
///
/// * `dwarf` - A reference to gimli-rs `Dwarf` struct.
/// * `unit` - A compilation unit which contains the given DIE.
/// * `pc` - A machine code address, usually the current code location.
/// * `expr` - The expression to be evaluated into `Piece`s.
/// * `frame_base` - The frame base address value.
/// * `registers` - A register struct for accessing the register values.
/// * `mem` - A struct for accessing the memory of the debug target.
///
/// This function will evaluate the given expression into a list of pieces.
/// These pieces describe the size and location of the variable the given expression is from.
pub fn evaluate_pieces<R: Reader<Offset = usize>, T: MemoryAccess>(
dwarf: &Dwarf<R>,
unit: &Unit<R>,
pc: u32,
expr: Expression<R>,
frame_base: Option<u64>,
registers: &Registers,
mem: &mut T,
) -> Result<Vec<gimli::Piece<R>>> {
let mut eval = expr.evaluation(unit.encoding());
let mut result = eval.evaluate()?;
loop {
match result {
Complete => break,
RequiresMemory {
address,
size,
space: _, // Do not know what this is used for.
base_type,
} => match mem.get_address(&(address as u32), size as usize) {
Some(data) => {
let value = eval_base_type(unit, data, base_type)?;
result = eval.resume_with_memory(convert_to_gimli_value(value))?;
}
None => {
return Err(anyhow!("Requires Memory"));
}
},
RequiresRegister {
register,
base_type,
} => match registers.get_register_value(®ister.0) {
Some(data) => {
let bytes = data.to_le_bytes().to_vec();
let value = eval_base_type(unit, bytes, base_type)?;
result = eval.resume_with_register(convert_to_gimli_value(value))?;
}
None => {
return Err(anyhow!("Requires register {}", register.0));
}
},
RequiresFrameBase => {
result = eval.resume_with_frame_base(match frame_base {
Some(val) => val,
None => {
error!("Requires frame base");
return Err(anyhow!("Requires frame base"));
}
})?;
}
RequiresTls(_tls) => {
error!("Unimplemented");
return Err(anyhow!("Unimplemented")); // TODO
}
RequiresCallFrameCfa => {
result = eval.resume_with_call_frame_cfa(
registers.cfa.ok_or(anyhow!("Requires CFA"))? as u64,
)?;
}
RequiresAtLocation(die_ref) => match die_ref {
DieReference::UnitRef(unit_offset) => help_at_location(
dwarf,
unit,
pc,
&mut eval,
&mut result,
frame_base,
unit_offset,
registers,
mem,
)?,
DieReference::DebugInfoRef(debug_info_offset) => {
let unit_header = dwarf.debug_info.header_from_offset(debug_info_offset)?;
if let Some(unit_offset) = debug_info_offset.to_unit_offset(&unit_header) {
let new_unit = dwarf.unit(unit_header)?;
help_at_location(
dwarf,
&new_unit,
pc,
&mut eval,
&mut result,
frame_base,
unit_offset,
registers,
mem,
)?;
} else {
return Err(anyhow!("Could not find at location"));
}
}
},
RequiresEntryValue(entry) => {
let entry_value = evaluate(
dwarf, unit, pc, entry, frame_base, None, None, registers, mem,
)?;
result = eval.resume_with_entry_value(convert_to_gimli_value(match entry_value
.to_value()
{
Some(val) => val,
None => {
error!("Optimized Out");
return Err(anyhow!("Optimized Out"));
}
}))?;
}
RequiresParameterRef(unit_offset) => {
let die = unit.entry(unit_offset)?;
let call_value = match die.attr_value(gimli::DW_AT_call_value)? {
Some(val) => val,
None => {
error!("Could not find required paramter");
return Err(anyhow!("Could not find required parameter"));
}
};
let expr = match call_value.exprloc_value() {
Some(val) => val,
None => {
error!("Could not find required paramter");
return Err(anyhow!("Could not find required parameter"));
}
};
let value = evaluate(
dwarf,
unit,
pc,
expr,
frame_base,
Some(unit),
Some(&die),
registers,
mem,
)?;
if let EvaluatorValue::Value(BaseTypeValue::U64(val), _) = value {
result = eval.resume_with_parameter_ref(val)?;
} else {
error!("Could not find required paramter");
return Err(anyhow!("Could not find required parameter"));
}
}
RequiresRelocatedAddress(_num) => {
error!("Unimplemented");
return Err(anyhow!("Unimplemented"));
// result = eval.resume_with_relocated_address(num)?; // TODO: Check and test if correct.
}
RequiresIndexedAddress {
index: _,
relocate: _,
} => {
// TODO: Check and test if correct. Also handle relocate flag
error!("Unimplemented");
return Err(anyhow!("Unimplemented"));
// result = eval.resume_with_indexed_address(dwarf.address(unit, index)?)?;
}
RequiresBaseType(unit_offset) => {
let die = unit.entry(unit_offset)?;
let mut attrs = die.attrs();
while let Some(attr) = match attrs.next() {
Ok(val) => val,
Err(err) => {
error!("{:?}", err);
return Err(anyhow!("{:?}", err));
}
} {
println!("Attribute name = {:?}", attr.name());
println!("Attribute value = {:?}", attr.value());
}
error!("Unimplemented");
return Err(anyhow!("Unimplemented"));
}
};
}
Ok(eval.result())
}
/// Will parse the value of a `DW_TAG_base_type`.
///
/// Description:
///
/// * `unit` - A compilation unit which contains the type DIE pointed to by the given offset.
/// * `unit` - The value to parse in bytes.
/// * `base_type` - A offset into the given compilation unit which points to a DIE with the tag
/// `DW_TAG_base_type`.
///
/// This function will parse the given value into the type given by the offset `base_type`.
fn eval_base_type<R>(
unit: &gimli::Unit<R>,
data: Vec<u8>,
base_type: gimli::UnitOffset<usize>,
) -> Result<BaseTypeValue>
where
R: Reader<Offset = usize>,
{
if base_type.0 == 0 {
// NOTE: length can't be more then one word
let value = match data.len() {
0 => 0,
1 => u8::from_le_bytes(match data.try_into() {
Ok(val) => val,
Err(err) => {
error!("{:?}", err);
return Err(anyhow!("{:?}", err));
}
}) as u64,
2 => u16::from_le_bytes(match data.try_into() {
Ok(val) => val,
Err(err) => {
error!("{:?}", err);
return Err(anyhow!("{:?}", err));
}
}) as u64,
4 => u32::from_le_bytes(match data.try_into() {
Ok(val) => val,
Err(err) => {
error!("{:?}", err);
return Err(anyhow!("{:?}", err));
}
}) as u64,
8 => u64::from_le_bytes(match data.try_into() {
Ok(val) => val,
Err(err) => {
error!("{:?}", err);
return Err(anyhow!("{:?}", err));
}
}),
_ => {
error!("Unreachable");
return Err(anyhow!("Unreachable"));
}
};
return Ok(BaseTypeValue::Generic(value));
}
let die = unit.entry(base_type)?;
// I think that the die returned must be a base type tag.
if die.tag() != gimli::DW_TAG_base_type {
error!("Requires at the die has tag DW_TAG_base_type");
return Err(anyhow!("Requires at the die has tag DW_TAG_base_type"));
}
let encoding = match die.attr_value(gimli::DW_AT_encoding)? {
Some(gimli::AttributeValue::Encoding(dwate)) => dwate,
_ => {
error!("Expected base type die to have attribute DW_AT_encoding");
return Err(anyhow!(
"Expected base type die to have attribute DW_AT_encoding"
));
}
};
BaseTypeValue::parse_base_type(data, encoding)
}
/// Will evaluate a value that is required when evaluating a expression into pieces.
///
/// Description:
///
/// * `dwarf` - A reference to gimli-rs `Dwarf` struct.
/// * `unit` - A compilation unit which contains the given DIE.
/// * `pc` - A machine code address, usually the current code location.
/// * `eval` - A gimli-rs `Evaluation` that will be continued with the new value.
/// * `result` - A gimli-rs `EvaluationResult` that will be updated with the new evaluation result.
/// * `frame_base` - The frame base address value.
/// * `unit_offset` - A offset to the DIE that will be evaluated and added to the given `Evaluation` struct.
/// * `registers` - A register struct for accessing the register values.
/// * `mem` - A struct for accessing the memory of the debug target.
///
/// This function is a helper function for continuing a `Piece` evaluation where another value
/// needs to be evaluated first.
fn help_at_location<R: Reader<Offset = usize>, T: MemoryAccess>(
dwarf: &Dwarf<R>,
unit: &Unit<R>,
pc: u32,
eval: &mut Evaluation<R>,
result: &mut EvaluationResult<R>,
frame_base: Option<u64>,
unit_offset: UnitOffset<usize>,
registers: &Registers,
mem: &mut T,
) -> Result<()>
where
R: Reader<Offset = usize>,
{
let die = unit.entry(unit_offset)?;
let location = match die.attr_value(gimli::DW_AT_location)? {
Some(val) => val,
None => {
error!("Could not find location attribute");
return Err(anyhow!("Could not find location attribute"));
}
};
if let Some(expr) = location.exprloc_value() {
let val = call_evaluate(dwarf, pc, expr, frame_base, &unit, &die, registers, mem)?;
if let EvaluatorValue::Bytes(b) = val {
*result = eval.resume_with_at_location(b)?;
return Ok(());
} else {
error!("Error expected bytes");
return Err(anyhow!("Error expected bytes"));
}
} else {
error!("die has no at location");
return Err(anyhow!("die has no at location"));
}
}
| 37.655172 | 120 | 0.517145 |
fe86a610d41e42c310e92cca6347321016b0662c | 6,892 | use bevy::{prelude::*, type_registry::TypeRegistry};
/// This example illustrates loading and saving scenes from files
fn main() {
App::build()
.add_default_plugins()
// Registering components informs Bevy that they exist. This allows them to be used when loading scenes
// This step is only required if you want to load your components from scene files.
// Unregistered components can still be used in your code, but they will be ignored during scene save/load.
// In the future registering components will also make them usable from the Bevy editor.
// The core Bevy plugins already register their components, so you only need this step for custom components.
.register_component::<ComponentA>()
.register_component::<ComponentB>()
.add_startup_system(save_scene_system.thread_local_system())
.add_startup_system(load_scene_system.system())
.add_startup_system(infotext_system.system())
.add_system(print_system.system())
.run();
}
// Registered components must implement the `Properties` and `FromResources` traits.
// The `Properties` trait enables serialization, deserialization, dynamic property access, and change detection.
// `Properties` enable a bunch of cool behaviors, so its worth checking out the dedicated `properties.rs` example.
// The `FromResources` trait determines how your component is constructed when it loads. For simple use cases you can just
// implement the `Default` trait (which automatically implements FromResources). The simplest registered component just needs
// these two derives:
#[derive(Properties, Default)]
struct ComponentA {
pub x: f32,
pub y: f32,
}
// Some components have fields that cannot (or should not) be written to scene files. These can be ignored with
// the #[property(ignore)] attribute. This is also generally where the `FromResources` trait comes into play.
// `FromResources` gives you access to your App's current ECS `Resources` when you construct your component.
#[derive(Properties)]
struct ComponentB {
pub value: String,
#[property(ignore)]
pub time_since_startup: std::time::Duration,
}
impl FromResources for ComponentB {
fn from_resources(resources: &Resources) -> Self {
let time = resources.get::<Time>().unwrap();
ComponentB {
time_since_startup: time.time_since_startup(),
value: "Default Value".to_string(),
}
}
}
fn load_scene_system(asset_server: Res<AssetServer>, mut scene_spawner: ResMut<SceneSpawner>) {
// Scenes are loaded just like any other asset.
let scene_handle: Handle<Scene> = asset_server
.load("assets/scenes/load_scene_example.scn")
.unwrap();
// SceneSpawner can "instance" scenes. "instancing" a scene creates a new instance of the scene in the World with new entity ids.
// This guarantees that it will not overwrite existing entities.
scene_spawner.instance(scene_handle);
// SceneSpawner can also "load" scenes. "loading" a scene preserves the entity ids in the scene.
// In general, you should "instance" scenes when you are dynamically composing your World and "load" scenes for things like game saves.
scene_spawner.load(scene_handle);
// we have now loaded `scene_handle` AND instanced it, which means our World now has one set of entities with the Scene's ids and
// one set of entities with new ids
// This tells the AssetServer to watch for changes to assets.
// It enables our scenes to automatically reload in game when we modify their files
asset_server.watch_for_changes().unwrap();
}
// Using SceneSpawner instance() and load() queues them up to be added to the World at the beginning of the next update. However if
// you need scenes to load immediately, you can use the following approach. But be aware that this takes full control of the ECS world
// and therefore blocks other parallel systems from executing until it finishes. In most cases you should use the SceneSpawner
// instance() and load() methods.
#[allow(dead_code)]
fn load_scene_right_now_system(world: &mut World, resources: &mut Resources) {
let scene_handle: Handle<Scene> = {
let asset_server = resources.get::<AssetServer>().unwrap();
let mut scenes = resources.get_mut::<Assets<Scene>>().unwrap();
asset_server
.load_sync(&mut scenes, "assets/scenes/load_scene_example.scn")
.unwrap()
};
let mut scene_spawner = resources.get_mut::<SceneSpawner>().unwrap();
scene_spawner
.load_sync(world, resources, scene_handle)
.unwrap();
}
// This system prints all ComponentA components in our world. Try making a change to a ComponentA in load_scene_example.scn.
// You should immediately see the changes appear in the console.
fn print_system(mut query: Query<(Entity, Changed<ComponentA>)>) {
for (entity, component_a) in &mut query.iter() {
println!(" Entity({})", entity.id());
println!(
" ComponentA: {{ x: {} y: {} }}\n",
component_a.x, component_a.y
);
}
}
fn save_scene_system(_world: &mut World, resources: &mut Resources) {
// Scenes can be created from any ECS World. You can either create a new one for the scene or use the current World.
let mut world = World::new();
world.spawn((
ComponentA { x: 1.0, y: 2.0 },
ComponentB {
value: "hello".to_string(),
..ComponentB::from_resources(resources)
},
));
world.spawn((ComponentA { x: 3.0, y: 4.0 },));
// The component registry resource contains information about all registered components. This is used to construct scenes.
let type_registry = resources.get::<TypeRegistry>().unwrap();
let scene = Scene::from_world(&world, &type_registry.component.read());
// Scenes can be serialized like this:
println!(
"{}",
scene.serialize_ron(&type_registry.property.read()).unwrap()
);
// TODO: save scene
}
// This is only necessary for the info message in the UI. See examples/ui/text.rs for a standalone text example.
fn infotext_system(mut commands: Commands, asset_server: Res<AssetServer>) {
let font_handle = asset_server.load("assets/fonts/FiraSans-Bold.ttf").unwrap();
commands
.spawn(UiCameraComponents::default())
.spawn(TextComponents {
style: Style {
align_self: AlignSelf::FlexEnd,
..Default::default()
},
text: Text {
value: "Nothing to see in this window! Check the console output!".to_string(),
font: font_handle,
style: TextStyle {
font_size: 50.0,
color: Color::WHITE,
},
},
..Default::default()
});
}
| 45.342105 | 139 | 0.679193 |
f78100d176e4bc19b9c2b862e68205f3c474818c | 2,951 | use super::scene::*;
use super::record::*;
pub(crate) enum Tab {
AbilityCfg,
LimitRuleCfg,
AbilityEffectCfg,
AbilityEffectTypeCfg,
FightBuffCfg,
FightBuffEffectCfg,
TargetRuleCfg,
FightPointCfg,
FightCfg,
FightTypeCfg,
FightRepressTypeCfg,
FightStarRepressCfg,
FightLevelRepressCfg,
}
pub(crate) enum Cfg {
None,
Ability(AbilityCfg),
LimitRuleCfg(LimitRuleCfg),
AbilityEffectCfg(AbilityEffectCfg),
FightBuffCfg(FightBuffCfg),
FightBuffEffectCfg(FightBuffEffectCfg),
TargetRuleCfg(TargetRuleCfg),
FightPointCfg(FightPointCfg),
FightCfg(FightCfg),
FightTypeCfg(FightTypeCfg),
FightRepressTypeCfg(FightRepressTypeCfg),
FightStarRepressCfg(FightStarRepressCfg),
FightLevelRepressCfg(FightLevelRepressCfg),
}
impl Cfg {
pub(crate) fn ability(&self) -> &AbilityCfg {
if let &Cfg::Ability(cfg) = &self { cfg } else { panic!("invalid cfg") }
}
pub(crate) fn limit_rule(&self) -> &LimitRuleCfg {
if let &Cfg::LimitRuleCfg(cfg) = &self { cfg } else { panic!("invalid cfg") }
}
pub(crate) fn ability_effect(&self) -> &AbilityEffectCfg {
if let &Cfg::AbilityEffectCfg(cfg) = &self { cfg } else { panic!("invalid cfg") }
}
pub(crate) fn fight_buff_cfg(&self) -> &FightBuffCfg {
if let &Cfg::FightBuffCfg(cfg) = &self { cfg } else { panic!("invalid cfg") }
}
pub(crate) fn fight_buff_effect_cfg(&self) -> &FightBuffEffectCfg {
if let &Cfg::FightBuffEffectCfg(cfg) = &self { cfg } else { panic!("invalid cfg") }
}
pub(crate) fn target_rule_cfg(&self) -> &TargetRuleCfg {
if let &Cfg::TargetRuleCfg(cfg) = &self { cfg } else { panic!("invalid cfg") }
}
pub(crate) fn fight_point_cfg(&self) -> &FightPointCfg {
if let &Cfg::FightPointCfg(cfg) = &self { cfg } else { panic!("invalid cfg") }
}
pub(crate) fn fight_cfg(&self) -> &FightCfg {
if let &Cfg::FightCfg(cfg) = &self { cfg } else { panic!("invalid cfg") }
}
pub(crate) fn fight_type_cfg(&self) -> &FightTypeCfg {
if let &Cfg::FightTypeCfg(cfg) = &self { cfg } else { panic!("invalid cfg") }
}
pub(crate) fn fight_repress_type_cfg(&self) -> &FightRepressTypeCfg {
if let &Cfg::FightRepressTypeCfg(cfg) = &self { cfg } else { panic!("invalid cfg") }
}
pub(crate) fn fight_star_repress_cfg(&self) -> &FightStarRepressCfg {
if let &Cfg::FightStarRepressCfg(cfg) = &self { cfg } else { panic!("invalid cfg") }
}
pub(crate) fn fight_level_repress_cfg(&self) -> &FightLevelRepressCfg {
if let &Cfg::FightLevelRepressCfg(cfg) = &self { cfg } else { panic!("invalid cfg") }
}
}
pub(crate) fn tb_get(_tab: Tab, _tid: i64) -> Box<Cfg> {
return Box::new(Cfg::None);
}
pub(crate) fn tb_all(_tab: Tab) -> Vec<Box<Cfg>> {
return vec![];
} | 36.432099 | 94 | 0.622162 |
676caae9fad38c3acea6c492284cbe4372556f54 | 7,607 | use std::{collections::HashMap, convert::TryFrom};
use anomaly::BoxError;
use tendermint_rpc::event::{Event as RpcEvent, EventData as RpcEventData};
use ibc::ics02_client::events::NewBlock;
use ibc::ics02_client::height::Height;
use ibc::ics24_host::identifier::ChainId;
use ibc::{
events::{IbcEvent, RawObject},
ics02_client::events as ClientEvents,
ics03_connection::events as ConnectionEvents,
ics04_channel::events as ChannelEvents,
};
pub fn get_all_events(
chain_id: &ChainId,
result: RpcEvent,
) -> Result<Vec<(Height, IbcEvent)>, String> {
let mut vals: Vec<(Height, IbcEvent)> = vec![];
match &result.data {
RpcEventData::NewBlock { block, .. } => {
let height = Height::new(
ChainId::chain_version(chain_id.to_string().as_str()),
u64::from(block.as_ref().ok_or("tx.height")?.header.height),
);
vals.push((height, NewBlock::new(height).into()));
}
RpcEventData::Tx { .. } => {
let events = &result.events.ok_or("missing events")?;
let height_raw = events.get("tx.height").ok_or("tx.height")?[0]
.parse::<u64>()
.map_err(|e| e.to_string())?;
let height = Height::new(
ChainId::chain_version(chain_id.to_string().as_str()),
height_raw,
);
let actions_and_indices = extract_helper(events)?;
for action in actions_and_indices {
if let Ok(event) = build_event(RawObject::new(
height,
action.0,
action.1 as usize,
events.clone(),
)) {
vals.push((height, event));
}
}
}
_ => {}
}
Ok(vals)
}
pub fn build_event(mut object: RawObject) -> Result<IbcEvent, BoxError> {
match object.action.as_str() {
// Client events
"create_client" => Ok(IbcEvent::from(ClientEvents::CreateClient::try_from(
object,
)?)),
"update_client" => Ok(IbcEvent::from(ClientEvents::UpdateClient::try_from(
object,
)?)),
"submit_misbehaviour" => Ok(IbcEvent::from(ClientEvents::ClientMisbehaviour::try_from(
object,
)?)),
// Connection events
"connection_open_init" => Ok(IbcEvent::from(ConnectionEvents::OpenInit::try_from(
object,
)?)),
"connection_open_try" => Ok(IbcEvent::from(ConnectionEvents::OpenTry::try_from(object)?)),
"connection_open_ack" => Ok(IbcEvent::from(ConnectionEvents::OpenAck::try_from(object)?)),
"connection_open_confirm" => Ok(IbcEvent::from(ConnectionEvents::OpenConfirm::try_from(
object,
)?)),
// Channel events
"channel_open_init" => Ok(IbcEvent::from(ChannelEvents::OpenInit::try_from(object)?)),
"channel_open_try" => Ok(IbcEvent::from(ChannelEvents::OpenTry::try_from(object)?)),
"channel_open_ack" => Ok(IbcEvent::from(ChannelEvents::OpenAck::try_from(object)?)),
"channel_open_confirm" => Ok(IbcEvent::from(ChannelEvents::OpenConfirm::try_from(
object,
)?)),
"channel_close_init" => Ok(IbcEvent::from(ChannelEvents::CloseInit::try_from(object)?)),
"channel_close_confirm" => Ok(IbcEvent::from(ChannelEvents::CloseConfirm::try_from(
object,
)?)),
// Packet events
// Note: There is no message.action "send_packet", the only one we can hook into is the
// module's action:
// - "transfer" for ICS20
// - "register" and "send" for ICS27
// However the attributes are all prefixed with "send_packet" therefore the overwrite here
// TODO: This need to be sorted out
"transfer" | "register" | "send" => {
object.action = "send_packet".to_string();
Ok(IbcEvent::from(ChannelEvents::SendPacket::try_from(object)?))
}
// Same here
// TODO: sort this out
"recv_packet" => {
object.action = "write_acknowledgement".to_string();
Ok(IbcEvent::from(
ChannelEvents::WriteAcknowledgement::try_from(object)?,
))
}
"write_acknowledgement" => Ok(IbcEvent::from(
ChannelEvents::WriteAcknowledgement::try_from(object)?,
)),
"acknowledge_packet" => Ok(IbcEvent::from(ChannelEvents::AcknowledgePacket::try_from(
object,
)?)),
"timeout_packet" => Ok(IbcEvent::from(ChannelEvents::TimeoutPacket::try_from(
object,
)?)),
"timeout_on_close_packet" => {
object.action = "timeout_packet".to_string();
Ok(IbcEvent::from(
ChannelEvents::TimeoutOnClosePacket::try_from(object)?,
))
}
event_type => Err(format!("Incorrect event type: '{}'", event_type).into()),
}
}
/// Takes events in the form
///
/// ```json
/// {
/// "events": {
/// "connection_open_init.client_id": [
/// "testclient",
/// "testclientsec"
/// ],
/// "connection_open_init.connection_id": [
/// "ancaconnonetest",
/// "ancaconnonetestsec"
/// ],
/// "connection_open_init.counterparty_client_id": [
/// "testclientsec",
/// "testclientsecsec"
/// ],
/// "create_client.client_id": [
/// "testclientthird"
/// ],
/// "create_client.client_type": [
/// "tendermint"
/// ],
/// "message.action": [
/// "connection_open_init",
/// "create_client",
/// "connection_open_init"
/// ],
/// "message.module": [
/// "ibc_connection",
/// "ibc_client",
/// "ibc_connection"
/// ],
/// "message.sender": [
/// "cosmos187xxg4yfkypl05cqylucezpjvycj24nurvm8p9",
/// "cosmos187xxg4yfkypl05cqylucezpjvycj24nurvm8p9",
/// "cosmos187xxg4yfkypl05cqylucezpjvycj24nurvm8p9",
/// "cosmos187xxg4yfkypl05cqylucezpjvycj24nurvm8p9"
/// ],
/// "tm.event": [
/// "Tx"
/// ],
/// "transfer.amount": [
/// "5000stake"
/// ],
/// "transfer.recipient": [
/// "cosmos17xpfvakm2amg962yls6f84z3kell8c5lserqta"
/// ],
/// "tx.hash": [
/// "A9E18AE3909F22232F8DBDB1C48F2FECB260A308A2D157E8832E901D45950605"
/// ],
/// "tx.height": [
/// "35"
/// ]
/// }
/// }
/// ```
///
/// and returns:
///
/// ```rust
/// vec![
/// ("connection_open_init", 0),
/// ("create_client", 0),
/// ("connection_open_init", 1),
/// ];
/// ```
///
/// where the number in each entry is the index in the matching events that should be used to build the event.
///
/// e.g. for the last "connection_open_init" in the result
///
/// ```text
/// "connection_open_init.client_id" -> "testclientsec"
/// "connection_open_init.connection_id" -> "ancaconnonetestsec",
/// "connection_open_init.counterparty_client_id" -> "testclientsec", "testclientsecsec",
/// ```
fn extract_helper(events: &HashMap<String, Vec<String>>) -> Result<Vec<(String, u32)>, String> {
let actions = events.get("message.action").ok_or("Incorrect Event Type")?;
let mut val_indices = HashMap::new();
let mut result = Vec::with_capacity(actions.len());
for action in actions {
let idx = val_indices.entry(action.clone()).or_insert_with(|| 0);
result.push((action.clone(), *idx));
*val_indices.get_mut(action.as_str()).unwrap() += 1;
}
Ok(result)
}
| 33.511013 | 110 | 0.572105 |
ac731d772e44d619d76f3d456584b26474a6c62c | 154 | use crate::common::*;
#[derive(Deserialize)]
#[serde(deny_unknown_fields)]
pub(crate) struct Link {
pub(crate) text: String,
pub(crate) url: Url,
}
| 17.111111 | 29 | 0.681818 |
fb6552866610f22407a3f5ff5efb4eec59f38b1d | 13,362 | //! Connection configuration.
//!
//! Requires the `runtime` Cargo feature (enabled by default).
use crate::connection::Connection;
use crate::Client;
use log::info;
use std::fmt;
use std::path::Path;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use tokio::runtime;
#[doc(inline)]
pub use tokio_postgres::config::{ChannelBinding, Host, SslMode, TargetSessionAttrs};
use tokio_postgres::error::DbError;
use tokio_postgres::tls::{MakeTlsConnect, TlsConnect};
use tokio_postgres::{Error, Socket};
/// Connection configuration.
///
/// Configuration can be parsed from libpq-style connection strings. These strings come in two formats:
///
/// # Key-Value
///
/// This format consists of space-separated key-value pairs. Values which are either the empty string or contain
/// whitespace should be wrapped in `'`. `'` and `\` characters should be backslash-escaped.
///
/// ## Keys
///
/// * `user` - The username to authenticate with. Required.
/// * `password` - The password to authenticate with.
/// * `dbname` - The name of the database to connect to. Defaults to the username.
/// * `options` - Command line options used to configure the server.
/// * `application_name` - Sets the `application_name` parameter on the server.
/// * `sslmode` - Controls usage of TLS. If set to `disable`, TLS will not be used. If set to `prefer`, TLS will be used
/// if available, but not used otherwise. If set to `require`, TLS will be forced to be used. Defaults to `prefer`.
/// * `host` - The host to connect to. On Unix platforms, if the host starts with a `/` character it is treated as the
/// path to the directory containing Unix domain sockets. Otherwise, it is treated as a hostname. Multiple hosts
/// can be specified, separated by commas. Each host will be tried in turn when connecting. Required if connecting
/// with the `connect` method.
/// * `port` - The port to connect to. Multiple ports can be specified, separated by commas. The number of ports must be
/// either 1, in which case it will be used for all hosts, or the same as the number of hosts. Defaults to 5432 if
/// omitted or the empty string.
/// * `connect_timeout` - The time limit in seconds applied to each socket-level connection attempt. Note that hostnames
/// can resolve to multiple IP addresses, and this limit is applied to each address. Defaults to no timeout.
/// * `keepalives` - Controls the use of TCP keepalive. A value of 0 disables keepalive and nonzero integers enable it.
/// This option is ignored when connecting with Unix sockets. Defaults to on.
/// * `keepalives_idle` - The number of seconds of inactivity after which a keepalive message is sent to the server.
/// This option is ignored when connecting with Unix sockets. Defaults to 2 hours.
/// * `target_session_attrs` - Specifies requirements of the session. If set to `read-write`, the client will check that
/// the `transaction_read_write` session parameter is set to `on`. This can be used to connect to the primary server
/// in a database cluster as opposed to the secondary read-only mirrors. Defaults to `all`.
///
/// ## Examples
///
/// ```not_rust
/// host=localhost user=postgres connect_timeout=10 keepalives=0
/// ```
///
/// ```not_rust
/// host=/var/run/postgresql,localhost port=1234 user=postgres password='password with spaces'
/// ```
///
/// ```not_rust
/// host=host1,host2,host3 port=1234,,5678 user=postgres target_session_attrs=read-write
/// ```
///
/// # Url
///
/// This format resembles a URL with a scheme of either `postgres://` or `postgresql://`. All components are optional,
/// and the format accept query parameters for all of the key-value pairs described in the section above. Multiple
/// host/port pairs can be comma-separated. Unix socket paths in the host section of the URL should be percent-encoded,
/// as the path component of the URL specifies the database name.
///
/// ## Examples
///
/// ```not_rust
/// postgresql://user@localhost
/// ```
///
/// ```not_rust
/// postgresql://user:password@%2Fvar%2Frun%2Fpostgresql/mydb?connect_timeout=10
/// ```
///
/// ```not_rust
/// postgresql://user@host1:1234,host2,host3:5678?target_session_attrs=read-write
/// ```
///
/// ```not_rust
/// postgresql:///mydb?user=user&host=/var/run/postgresql
/// ```
#[derive(Clone)]
pub struct Config {
config: tokio_postgres::Config,
notice_callback: Arc<dyn Fn(DbError) + Send + Sync>,
}
impl fmt::Debug for Config {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Config")
.field("config", &self.config)
.finish()
}
}
impl Default for Config {
fn default() -> Config {
Config::new()
}
}
impl Config {
/// Creates a new configuration.
pub fn new() -> Config {
tokio_postgres::Config::new().into()
}
/// Sets the user to authenticate with.
///
/// Required.
pub fn user(&mut self, user: &str) -> &mut Config {
self.config.user(user);
self
}
/// Gets the user to authenticate with, if one has been configured with
/// the `user` method.
pub fn get_user(&self) -> Option<&str> {
self.config.get_user()
}
/// Sets the password to authenticate with.
pub fn password<T>(&mut self, password: T) -> &mut Config
where
T: AsRef<[u8]>,
{
self.config.password(password);
self
}
/// Gets the password to authenticate with, if one has been configured with
/// the `password` method.
pub fn get_password(&self) -> Option<&[u8]> {
self.config.get_password()
}
/// Sets the name of the database to connect to.
///
/// Defaults to the user.
pub fn dbname(&mut self, dbname: &str) -> &mut Config {
self.config.dbname(dbname);
self
}
/// Gets the name of the database to connect to, if one has been configured
/// with the `dbname` method.
pub fn get_dbname(&self) -> Option<&str> {
self.config.get_dbname()
}
/// Sets command line options used to configure the server.
pub fn options(&mut self, options: &str) -> &mut Config {
self.config.options(options);
self
}
/// Gets the command line options used to configure the server, if the
/// options have been set with the `options` method.
pub fn get_options(&self) -> Option<&str> {
self.config.get_options()
}
/// Sets the value of the `application_name` runtime parameter.
pub fn application_name(&mut self, application_name: &str) -> &mut Config {
self.config.application_name(application_name);
self
}
/// Gets the value of the `application_name` runtime parameter, if it has
/// been set with the `application_name` method.
pub fn get_application_name(&self) -> Option<&str> {
self.config.get_application_name()
}
/// Sets the remote option
pub fn remote(&mut self, remote: &str) -> &mut Config {
self.config.remote(remote);
self
}
/// Gets the value of the remote option
pub fn get_remote(&self) -> Option<&str> {
self.config.get_remote()
}
/// Sets the SSL configuration.
///
/// Defaults to `prefer`.
pub fn ssl_mode(&mut self, ssl_mode: SslMode) -> &mut Config {
self.config.ssl_mode(ssl_mode);
self
}
/// Gets the SSL configuration.
pub fn get_ssl_mode(&self) -> SslMode {
self.config.get_ssl_mode()
}
/// Adds a host to the configuration.
///
/// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix
/// systems, a host starting with a `/` is interpreted as a path to a directory containing Unix domain sockets.
pub fn host(&mut self, host: &str) -> &mut Config {
self.config.host(host);
self
}
/// Gets the hosts that have been added to the configuration with `host`.
pub fn get_hosts(&self) -> &[Host] {
self.config.get_hosts()
}
/// Adds a Unix socket host to the configuration.
///
/// Unlike `host`, this method allows non-UTF8 paths.
#[cfg(unix)]
pub fn host_path<T>(&mut self, host: T) -> &mut Config
where
T: AsRef<Path>,
{
self.config.host_path(host);
self
}
/// Adds a port to the configuration.
///
/// Multiple ports can be specified by calling this method multiple times. There must either be no ports, in which
/// case the default of 5432 is used, a single port, in which it is used for all hosts, or the same number of ports
/// as hosts.
pub fn port(&mut self, port: u16) -> &mut Config {
self.config.port(port);
self
}
/// Gets the ports that have been added to the configuration with `port`.
pub fn get_ports(&self) -> &[u16] {
self.config.get_ports()
}
/// Sets the timeout applied to socket-level connection attempts.
///
/// Note that hostnames can resolve to multiple IP addresses, and this timeout will apply to each address of each
/// host separately. Defaults to no limit.
pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Config {
self.config.connect_timeout(connect_timeout);
self
}
/// Gets the connection timeout, if one has been set with the
/// `connect_timeout` method.
pub fn get_connect_timeout(&self) -> Option<&Duration> {
self.config.get_connect_timeout()
}
/// Controls the use of TCP keepalive.
///
/// This is ignored for Unix domain socket connections. Defaults to `true`.
pub fn keepalives(&mut self, keepalives: bool) -> &mut Config {
self.config.keepalives(keepalives);
self
}
/// Reports whether TCP keepalives will be used.
pub fn get_keepalives(&self) -> bool {
self.config.get_keepalives()
}
/// Sets the amount of idle time before a keepalive packet is sent on the connection.
///
/// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. Defaults to 2 hours.
pub fn keepalives_idle(&mut self, keepalives_idle: Duration) -> &mut Config {
self.config.keepalives_idle(keepalives_idle);
self
}
/// Gets the configured amount of idle time before a keepalive packet will
/// be sent on the connection.
pub fn get_keepalives_idle(&self) -> Duration {
self.config.get_keepalives_idle()
}
/// Sets the requirements of the session.
///
/// This can be used to connect to the primary server in a clustered database rather than one of the read-only
/// secondary servers. Defaults to `Any`.
pub fn target_session_attrs(
&mut self,
target_session_attrs: TargetSessionAttrs,
) -> &mut Config {
self.config.target_session_attrs(target_session_attrs);
self
}
/// Gets the requirements of the session.
pub fn get_target_session_attrs(&self) -> TargetSessionAttrs {
self.config.get_target_session_attrs()
}
/// Sets the channel binding behavior.
///
/// Defaults to `prefer`.
pub fn channel_binding(&mut self, channel_binding: ChannelBinding) -> &mut Config {
self.config.channel_binding(channel_binding);
self
}
/// Gets the channel binding behavior.
pub fn get_channel_binding(&self) -> ChannelBinding {
self.config.get_channel_binding()
}
/// Sets the notice callback.
///
/// This callback will be invoked with the contents of every
/// [`AsyncMessage::Notice`] that is received by the connection. Notices use
/// the same structure as errors, but they are not "errors" per-se.
///
/// Notices are distinct from notifications, which are instead accessible
/// via the [`Notifications`] API.
///
/// [`AsyncMessage::Notice`]: tokio_postgres::AsyncMessage::Notice
/// [`Notifications`]: crate::Notifications
pub fn notice_callback<F>(&mut self, f: F) -> &mut Config
where
F: Fn(DbError) + Send + Sync + 'static,
{
self.notice_callback = Arc::new(f);
self
}
/// Opens a connection to a PostgreSQL database.
pub fn connect<T>(&self, tls: T) -> Result<Client, Error>
where
T: MakeTlsConnect<Socket> + 'static + Send,
T::TlsConnect: Send,
T::Stream: Send,
<T::TlsConnect as TlsConnect<Socket>>::Future: Send,
{
let runtime = runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap(); // FIXME don't unwrap
let (client, connection) = runtime.block_on(self.config.connect(tls))?;
let connection = Connection::new(runtime, connection, self.notice_callback.clone());
Ok(Client::new(connection, client))
}
}
impl FromStr for Config {
type Err = Error;
fn from_str(s: &str) -> Result<Config, Error> {
s.parse::<tokio_postgres::Config>().map(Config::from)
}
}
impl From<tokio_postgres::Config> for Config {
fn from(config: tokio_postgres::Config) -> Config {
Config {
config,
notice_callback: Arc::new(|notice| {
info!("{}: {}", notice.severity(), notice.message())
}),
}
}
}
| 35.163158 | 120 | 0.646086 |
d5de0610ac58ccac071f20f38173b1d1698e1a85 | 1,481 | // Copyright Rivtower Technologies LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde_derive::Deserialize;
#[derive(Debug, Deserialize, Clone)]
pub struct NetConfig {
pub enable_tls: bool,
pub port: u16,
pub peers: Vec<PeerConfig>,
}
#[derive(Debug, Deserialize, Clone)]
pub struct PeerConfig {
pub ip: String,
pub port: u16,
}
impl NetConfig {
pub fn new(config_str: &str) -> Self {
toml::from_str::<NetConfig>(config_str).expect("Error while parsing config")
}
}
#[cfg(test)]
mod tests {
use super::NetConfig;
#[test]
fn basic_test() {
let toml_str = r#"
enable_tls = false
port = 40000
[[peers]]
ip = "127.0.0.1"
port = 40001
[[peers]]
ip = "127.0.0.1"
port = 40002
"#;
let config = NetConfig::new(toml_str);
assert_eq!(config.port, 40000);
assert_eq!(config.peers.len(), 2);
}
}
| 25.101695 | 84 | 0.632681 |
260ed5802ef5bb1f00a5d919a7fb792e5d163024 | 171 | use gdnative::prelude::*;
#[derive(NativeClass)]
#[inherit]
struct Foo {}
#[methods]
impl Foo {
fn new(_owner: &Node) -> Self {
Foo {}
}
}
fn main() {}
| 11.4 | 35 | 0.54386 |
b9989e366f9cb9c5f41cca1144cce0ad6cba28d4 | 3,334 | use crate::sys::{
CoCreateInstance, CoGetClassObject, CoInitializeEx, CoUninitialize, CLSCTX_INPROC_SERVER,
COINIT_APARTMENTTHREADED, FAILED, HRESULT, IID, S_FALSE, S_OK,
};
use std::ffi::c_void;
use crate::{
interfaces::iclass_factory::{IClassFactory, IID_ICLASS_FACTORY},
CoClass, ComInterface, ComPtr, ComRc,
};
pub struct ApartmentThreadedRuntime {
_not_send: *const (),
}
impl ApartmentThreadedRuntime {
pub fn new() -> Result<ApartmentThreadedRuntime, HRESULT> {
// Attempt to initialize the runtime first. `CoUninitialize` should be called only if this
// is successful. Since the `CoUninitialize` call is made through the `Drop` implementation
// of `ApartmentThreadedRuntime`, we need to be careful to not instantiate the runtime in
// case the `CoInitializeEx` fails.
//
// https://docs.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-couninitialize
unsafe {
match CoInitializeEx(std::ptr::null_mut::<c_void>(), COINIT_APARTMENTTHREADED) {
// S_OK indicates the runtime was initialized, S_FALSE means it was initialized
// previously. In both cases we need to invoke `CoUninitialize` later.
S_OK | S_FALSE => Ok(ApartmentThreadedRuntime {
_not_send: std::ptr::null(),
}),
// Any other result is considered an error here.
hr => Err(hr),
}
}
}
pub fn get_class_object(&self, iid: &IID) -> Result<ComRc<dyn IClassFactory>, HRESULT> {
let mut class_factory = std::ptr::null_mut::<c_void>();
let hr = unsafe {
CoGetClassObject(
iid as *const IID,
CLSCTX_INPROC_SERVER,
std::ptr::null_mut::<c_void>(),
&IID_ICLASS_FACTORY as *const IID,
&mut class_factory as *mut *mut c_void,
)
};
if FAILED(hr) {
return Err(hr);
}
Ok(unsafe { ComRc::from_raw(class_factory as *mut *mut _) })
}
pub fn create_instance<T: ComInterface + ?Sized>(
&self,
clsid: &IID,
) -> Result<ComRc<T>, HRESULT> {
unsafe {
Ok(ComRc::new(
self.create_raw_instance::<T>(clsid, std::ptr::null_mut())?,
))
}
}
pub fn create_aggregated_instance<T: ComInterface + ?Sized, U: CoClass>(
&self,
clsid: &IID,
outer: &mut U,
) -> Result<ComPtr<T>, HRESULT> {
unsafe { self.create_raw_instance::<T>(clsid, outer as *mut U as *mut c_void) }
}
pub unsafe fn create_raw_instance<T: ComInterface + ?Sized>(
&self,
clsid: &IID,
outer: *mut c_void,
) -> Result<ComPtr<T>, HRESULT> {
let mut instance = std::ptr::null_mut::<c_void>();
let hr = CoCreateInstance(
clsid as *const IID,
outer,
CLSCTX_INPROC_SERVER,
&T::IID as *const IID,
&mut instance as *mut *mut c_void,
);
if FAILED(hr) {
return Err(hr);
}
Ok(ComPtr::new(instance as *mut _))
}
}
impl std::ops::Drop for ApartmentThreadedRuntime {
fn drop(&mut self) {
unsafe { CoUninitialize() }
}
}
| 33.009901 | 101 | 0.573485 |
d953c7b513f66448edf23c8d7e24af972b5f22b7 | 3,773 | //! A collection of functions that are useful for unit testing your html! views.
use crate::VirtualNode;
impl VirtualNode {
/// Get a vector of all of the VirtualNode children / grandchildren / etc of
/// your virtual_node that have a label that matches your filter.
///
/// # Examples
///
/// ```rust,ignore
/// # #[macro_use] extern crate virtual_dom_rs; fn main() {
///
/// let component = html! {<div>
/// <span label="hello",> {"Hi!"} </span>
/// <em label="world",> {"There!!"} </em>
/// <em label="hello",></em>
/// </div> };
///
/// let hello_nodes = component.filter_label(|label| {
/// label.contains("hello")
/// });
///
/// assert_eq!(hello_nodes.len(), 2);
/// }
/// ```
pub fn filter_label<'a, F>(&'a self, filter: F) -> Vec<&'a VirtualNode>
where
F: Fn(&str) -> bool,
{
let mut descendants = vec![];
for child in self.children.as_ref().unwrap() {
get_descendants(&mut descendants, &child);
}
let mut filtered_descendants = vec![];
for node in descendants.into_iter() {
match node.props.get("label") {
Some(label) => {
if filter(label) {
filtered_descendants.push(node);
}
}
None => {}
};
}
filtered_descendants
}
/// Get a vector of all of the descendants of this VirtualNode
/// that have the provided `filter`.
///
/// # Examples
///
/// ```rust,ignore
/// # #[macro_use] extern crate virtual_dom_rs; fn main() {
///
/// let component = html! {<div>
/// <span label="hello",> {"Hi!"} </span>
/// <em label="world",> {"There!!"} </em>
/// <em label="hello",></em>
/// </div> };
///
/// let hello_nodes = component.filter_label_equals("hello");
///
/// assert_eq!(hello_nodes.len(), 2);
/// }
/// ```
pub fn filter_label_equals<'a>(&'a self, label: &str) -> Vec<&'a VirtualNode> {
self.filter_label(|node_label| node_label == label)
}
}
fn get_descendants<'a>(descendants: &mut Vec<&'a VirtualNode>, node: &'a VirtualNode) {
descendants.push(node);
for child in node.children.as_ref().unwrap() {
get_descendants(descendants, child);
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
// TODO: Move this test somewhere that we can use the `html!` macro
// #[test]
// fn filter_label() {
// let html = html! {
// // Should not pick up labels on the root node
// <div label="hello0",>
// // This node gets picked up
// <span label="hello1",>
// </span>
// // This node gets picked up
// <em label="hello2",>
// { "hello there :)!" }
// </em>
// <div label="world",></div>
// </div>
// };
//
// let hello_nodes = html.filter_label(|label| label.contains("hello"));
//
// assert_eq!(
// hello_nodes.len(),
// 2,
// "2 elements with label containing 'hello'"
// );
// }
#[test]
fn label_equals() {
let span = VirtualNode::new("span");
let mut props = HashMap::new();
props.insert("label".to_string(), "hello".to_string());
let mut em = VirtualNode::new("em");
em.props = props;
let mut html = VirtualNode::new("div");
html.children.as_mut().unwrap().push(span);
html.children.as_mut().unwrap().push(em);
let hello_nodes = html.filter_label_equals("hello");
assert_eq!(hello_nodes.len(), 1);
}
}
| 28.156716 | 87 | 0.508879 |
bfa2574dd69536cec00010107a970d62280b231d | 14,793 | //! Line rendering.
use core::convert::Infallible;
use crate::{
alignment::{HorizontalTextAlignment, VerticalTextAlignment},
parser::{Parser, Token},
rendering::{cursor::LineCursor, line_iter::LineElementParser},
style::{color::Rgb, height_mode::HeightMode, TextBoxStyle},
utils::str_width,
};
use embedded_graphics::{
draw_target::DrawTarget,
geometry::Point,
text::{
renderer::{CharacterStyle, TextRenderer},
Baseline,
},
Drawable,
};
#[cfg(feature = "ansi")]
use super::ansi::Sgr;
use super::{line_iter::ElementHandler, space_config::UniformSpaceConfig};
/// Render a single line of styled text.
#[derive(Debug)]
pub struct StyledLineRenderer<'a, S, A, V, H>
where
S: Clone,
{
cursor: LineCursor,
state: LineRenderState<'a, S, A, V, H>,
}
#[derive(Debug, Clone)]
pub struct LineRenderState<'a, S, A, V, H>
where
S: Clone,
{
pub parser: Parser<'a>,
pub character_style: S,
pub style: TextBoxStyle<A, V, H>,
pub carried_token: Option<Token<'a>>,
}
impl<S, A, V, H> LineRenderState<'_, S, A, V, H>
where
S: Clone,
{
pub fn is_finished(&self) -> bool {
self.carried_token.is_none() && self.parser.is_empty()
}
}
impl<'a, F, A, V, H> StyledLineRenderer<'a, F, A, V, H>
where
F: TextRenderer<Color = <F as CharacterStyle>::Color> + CharacterStyle,
<F as CharacterStyle>::Color: From<Rgb>,
H: HeightMode,
{
/// Creates a new line renderer.
#[inline]
pub fn new(cursor: LineCursor, state: LineRenderState<'a, F, A, V, H>) -> Self {
Self { cursor, state }
}
}
struct RenderElementHandler<'a, F, D> {
style: &'a mut F,
display: &'a mut D,
pos: Point,
}
impl<'a, F, D> ElementHandler for RenderElementHandler<'a, F, D>
where
F: CharacterStyle + TextRenderer,
<F as CharacterStyle>::Color: From<Rgb>,
D: DrawTarget<Color = <F as TextRenderer>::Color>,
{
type Error = D::Error;
fn measure(&self, st: &str) -> u32 {
str_width(self.style, st)
}
fn whitespace(&mut self, width: u32) -> Result<(), Self::Error> {
self.pos = self
.style
.draw_whitespace(width, self.pos, Baseline::Top, self.display)?;
Ok(())
}
fn printed_characters(&mut self, st: &str, _: u32) -> Result<(), Self::Error> {
self.pos = self
.style
.draw_string(st, self.pos, Baseline::Top, self.display)?;
Ok(())
}
fn move_cursor(&mut self, by: i32) -> Result<(), Self::Error> {
// LineElementIterator ensures this new pos is valid.
self.pos = Point::new(self.pos.x + by, self.pos.y);
Ok(())
}
#[cfg(feature = "ansi")]
fn sgr(&mut self, sgr: Sgr) -> Result<(), Self::Error> {
sgr.apply(self.style);
Ok(())
}
}
struct StyleOnlyRenderElementHandler<'a, F> {
style: &'a mut F,
}
impl<'a, F> ElementHandler for StyleOnlyRenderElementHandler<'a, F>
where
F: CharacterStyle + TextRenderer,
<F as CharacterStyle>::Color: From<Rgb>,
{
type Error = Infallible;
fn measure(&self, st: &str) -> u32 {
str_width(self.style, st)
}
#[cfg(feature = "ansi")]
fn sgr(&mut self, sgr: Sgr) -> Result<(), Self::Error> {
sgr.apply(self.style);
Ok(())
}
}
impl<'a, F, A, V, H> Drawable for StyledLineRenderer<'a, F, A, V, H>
where
F: TextRenderer<Color = <F as CharacterStyle>::Color> + CharacterStyle,
<F as CharacterStyle>::Color: From<Rgb>,
A: HorizontalTextAlignment,
V: VerticalTextAlignment,
H: HeightMode,
{
type Color = <F as CharacterStyle>::Color;
type Output = LineRenderState<'a, F, A, V, H>;
#[inline]
fn draw<D>(&self, display: &mut D) -> Result<Self::Output, D::Error>
where
D: DrawTarget<Color = Self::Color>,
{
let LineRenderState {
mut parser,
mut character_style,
style,
carried_token,
} = self.state.clone();
let carried = if display.bounding_box().size.height == 0 {
// We're outside of the view - no need for a separate measure pass.
let mut elements = LineElementParser::<'_, '_, _, A>::new(
&mut parser,
self.cursor.clone(),
UniformSpaceConfig::new(&character_style),
carried_token,
);
elements
.process(&mut StyleOnlyRenderElementHandler {
style: &mut character_style,
})
.unwrap()
} else {
// We have to resort to trickery to figure out the string that is rendered as the line.
let mut cloned_parser = parser.clone();
let lm = style.measure_line(
&character_style,
&mut cloned_parser,
&mut carried_token.clone(),
self.cursor.line_width(),
);
let consumed_bytes = parser.as_str().len() - cloned_parser.as_str().len();
let line_str = unsafe { parser.as_str().get_unchecked(..consumed_bytes) };
let (left, space_config) = A::place_line(line_str, &character_style, lm);
let mut cursor = self.cursor.clone();
cursor.move_cursor(left as i32).ok();
let pos = cursor.pos();
let mut elements = LineElementParser::<'_, '_, _, A>::new(
&mut parser,
cursor,
space_config,
carried_token,
);
elements.process(&mut RenderElementHandler {
style: &mut character_style,
display,
pos,
})?
};
Ok(LineRenderState {
parser,
character_style,
style,
carried_token: carried,
})
}
}
#[cfg(feature = "ansi")]
impl Sgr {
fn apply<F>(self, renderer: &mut F)
where
F: CharacterStyle,
<F as CharacterStyle>::Color: From<Rgb>,
{
use embedded_graphics::text::DecorationColor;
match self {
Sgr::Reset => {
renderer.set_text_color(None);
renderer.set_background_color(None);
renderer.set_underline_color(DecorationColor::None);
renderer.set_strikethrough_color(DecorationColor::None);
}
Sgr::ChangeTextColor(color) => {
renderer.set_text_color(Some(color.into()));
}
Sgr::DefaultTextColor => {
renderer.set_text_color(None);
}
Sgr::ChangeBackgroundColor(color) => {
renderer.set_background_color(Some(color.into()));
}
Sgr::DefaultBackgroundColor => {
renderer.set_background_color(None);
}
Sgr::Underline => {
renderer.set_underline_color(DecorationColor::TextColor);
}
Sgr::UnderlineOff => {
renderer.set_underline_color(DecorationColor::None);
}
Sgr::CrossedOut => {
renderer.set_strikethrough_color(DecorationColor::TextColor);
}
Sgr::NotCrossedOut => {
renderer.set_strikethrough_color(DecorationColor::None);
}
}
}
}
#[cfg(test)]
mod test {
use crate::{
alignment::{HorizontalTextAlignment, VerticalTextAlignment},
parser::Parser,
rendering::{
cursor::LineCursor,
line::{LineRenderState, StyledLineRenderer},
},
style::{color::Rgb, height_mode::HeightMode, TabSize, TextBoxStyle, TextBoxStyleBuilder},
utils::test::size_for,
};
use embedded_graphics::{
geometry::Point,
mock_display::MockDisplay,
mono_font::{ascii::FONT_6X9, MonoTextStyleBuilder},
pixelcolor::BinaryColor,
primitives::Rectangle,
text::renderer::{CharacterStyle, TextRenderer},
Drawable,
};
fn test_rendered_text<'a, S, A, V, H>(
text: &'a str,
bounds: Rectangle,
character_style: S,
style: TextBoxStyle<A, V, H>,
pattern: &[&str],
) where
S: TextRenderer<Color = <S as CharacterStyle>::Color> + CharacterStyle,
<S as CharacterStyle>::Color: From<Rgb> + embedded_graphics::mock_display::ColorMapping,
A: HorizontalTextAlignment,
V: VerticalTextAlignment,
H: HeightMode,
{
let parser = Parser::parse(text);
let cursor = LineCursor::new(
bounds.size.width,
TabSize::Spaces(4).into_pixels(&character_style),
);
let state = LineRenderState {
parser,
character_style,
style,
carried_token: None,
};
let renderer = StyledLineRenderer::new(cursor, state);
let mut display = MockDisplay::new();
display.set_allow_overdraw(true);
renderer.draw(&mut display).unwrap();
display.assert_pattern(pattern);
}
#[test]
fn simple_render() {
let character_style = MonoTextStyleBuilder::new()
.font(&FONT_6X9)
.text_color(BinaryColor::On)
.background_color(BinaryColor::Off)
.build();
let style = TextBoxStyleBuilder::new().build();
test_rendered_text(
"Some sample text",
Rectangle::new(Point::zero(), size_for(&FONT_6X9, 7, 1)),
character_style,
style,
&[
"........................",
"..##....................",
".#..#...................",
"..#.....##..##.#....##..",
"...#...#..#.#.#.#..#.##.",
".#..#..#..#.#.#.#..##...",
"..##....##..#...#...###.",
"........................",
"........................",
],
);
}
#[test]
fn simple_render_nbsp() {
let character_style = MonoTextStyleBuilder::new()
.font(&FONT_6X9)
.text_color(BinaryColor::On)
.background_color(BinaryColor::Off)
.build();
let style = TextBoxStyleBuilder::new().build();
test_rendered_text(
"Some\u{A0}sample text",
Rectangle::new(Point::zero(), size_for(&FONT_6X9, 7, 1)),
character_style,
style,
&[
"..........................................",
"..##......................................",
".#..#.....................................",
"..#.....##..##.#....##..........###...###.",
"...#...#..#.#.#.#..#.##........##....#..#.",
".#..#..#..#.#.#.#..##............##..#..#.",
"..##....##..#...#...###........###....###.",
"..........................................",
"..........................................",
],
);
}
#[test]
fn simple_render_first_word_not_wrapped() {
let character_style = MonoTextStyleBuilder::new()
.font(&FONT_6X9)
.text_color(BinaryColor::On)
.background_color(BinaryColor::Off)
.build();
let style = TextBoxStyleBuilder::new().build();
test_rendered_text(
"Some sample text",
Rectangle::new(Point::zero(), size_for(&FONT_6X9, 2, 1)),
character_style,
style,
&[
"............",
"..##........",
".#..#.......",
"..#.....##..",
"...#...#..#.",
".#..#..#..#.",
"..##....##..",
"............",
"............",
],
);
}
#[test]
fn newline_stops_render() {
let character_style = MonoTextStyleBuilder::new()
.font(&FONT_6X9)
.text_color(BinaryColor::On)
.background_color(BinaryColor::Off)
.build();
let style = TextBoxStyleBuilder::new().build();
test_rendered_text(
"Some \nsample text",
Rectangle::new(Point::zero(), size_for(&FONT_6X9, 7, 1)),
character_style,
style,
&[
"........................",
"..##....................",
".#..#...................",
"..#.....##..##.#....##..",
"...#...#..#.#.#.#..#.##.",
".#..#..#..#.#.#.#..##...",
"..##....##..#...#...###.",
"........................",
"........................",
],
);
}
}
#[cfg(all(test, feature = "ansi"))]
mod ansi_parser_tests {
use crate::{
parser::Parser,
rendering::{
cursor::LineCursor,
line::{LineRenderState, StyledLineRenderer},
},
style::{TabSize, TextBoxStyleBuilder},
utils::test::size_for,
};
use embedded_graphics::{
mock_display::MockDisplay,
mono_font::{ascii::FONT_6X9, MonoTextStyleBuilder},
pixelcolor::BinaryColor,
Drawable,
};
#[test]
fn ansi_cursor_backwards() {
let mut display = MockDisplay::new();
display.set_allow_overdraw(true);
let parser = Parser::parse("foo\x1b[2Dsample");
let character_style = MonoTextStyleBuilder::new()
.font(&FONT_6X9)
.text_color(BinaryColor::On)
.background_color(BinaryColor::Off)
.build();
let style = TextBoxStyleBuilder::new().build();
let cursor = LineCursor::new(
size_for(&FONT_6X9, 7, 1).width,
TabSize::Spaces(4).into_pixels(&character_style),
);
let state = LineRenderState {
parser,
character_style,
style,
carried_token: None,
};
StyledLineRenderer::new(cursor, state)
.draw(&mut display)
.unwrap();
display.assert_pattern(&[
"..........................................",
"...#...........................##.........",
"..#.#...........................#.........",
"..#.....###...###.##.#...###....#.....##..",
".###...##....#..#.#.#.#..#..#...#....#.##.",
"..#......##..#..#.#.#.#..#..#...#....##...",
"..#....###....###.#...#..###...###....###.",
".........................#................",
".........................#................",
]);
}
}
| 30.12831 | 99 | 0.469884 |
2170a0abb619b373c4aad39b242eb5fd27416298 | 2,355 | use crate::{AttributeList, Diff, List, PatchAttributeList, PatchCommon};
use sulafat_macros::{Clone, PartialEq, Serialize};
#[derive(Default, Clone, Debug, PartialEq, Serialize)]
pub struct Common<Msg> {
#[serde(skip)]
pub(crate) key: Option<String>,
pub(crate) attribute_list: AttributeList<Msg>,
pub(crate) children: List<Msg>,
}
impl<Msg> Common<Msg> {
pub fn new(
key: Option<String>,
attribute_list: AttributeList<Msg>,
children: List<Msg>,
) -> Self {
Self {
key,
attribute_list,
children,
}
}
}
impl<Msg> Diff for Common<Msg> {
type Patch = PatchCommon;
fn diff(&self, other: &mut Self) -> Option<Self::Patch> {
match (
self.attribute_list.diff(&mut other.attribute_list),
self.children.diff(&mut other.children),
) {
(None, None) => None,
(attribute_list, children) => Some(PatchCommon {
attribute_list: attribute_list.unwrap_or_else(|| PatchAttributeList::default()),
children,
}),
}
}
}
impl<Msg> Eq for Common<Msg> {}
#[cfg(test)]
mod test {
use crate::{
element::rendered::RenderedCommon, id, Apply, Common, Diff, PatchAttributeListOp,
PatchCommon, RenderedAttribute,
};
#[test]
fn same() {
let common1 = Common::<()>::default();
let mut common2 = Common::default();
assert_eq!(common1.diff(&mut common2), None)
}
#[test]
fn different_id() {
let common1 = Common::<()>::new(None, vec![id("a".into())].into(), Default::default());
let mut common2 = Common::new(None, vec![id("b".into())].into(), Default::default());
let patch = common1.diff(&mut common2);
assert_eq!(
patch,
Some(PatchCommon {
attribute_list: vec![PatchAttributeListOp::Insert(RenderedAttribute::Id(
"b".into()
))]
.into(),
children: Default::default()
})
);
let mut rendered_common1 = RenderedCommon::from(&common1);
let rendered_common2 = RenderedCommon::from(&common2);
rendered_common1.apply(patch.unwrap()).unwrap();
assert_eq!(rendered_common1, rendered_common2);
}
}
| 30.192308 | 96 | 0.563057 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.