file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
main.rs
/* MIT License Copyright (c) 2021 Philipp Schuster Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #![no_std] #![no_main] core::arch::global_asm!(include_str!("start.S")); mod hedron; use crate::hedron::capability::CrdPortIO; use crate::hedron::pd_ctrl::{pd_ctrl_delegate, DelegateFlags}; use crate::hedron::ROOTTASK_CAPSEL; use core::fmt::Write; use core::panic::PanicInfo; use uart_16550::SerialPort; /// Default standard I/O port of the serial device on (legacy) x86 platforms. /// On legacy BIOS systems the actual port can be found in the Bios Data Area (BDA). /// On modern systems the serial port is usually provided by a PCI card. /// See https://tldp.org/HOWTO/Serial-HOWTO-8.html /// /// 0x3f8 definitely works in QEMU. const SERIAL_PORT: u16 = 0x3f8; /// Set's itself the permissions in the port I/O bitmap via Hedron syscall /// and outputs something to serial. #[no_mangle] fn
(hip_ptr: *const u8, utcb_ptr: *const u8) -> ! { // demonstration that vector instructions and vector registers work too // (no #GPF or so) let a = [1.1, 2.2, 3.3, 4.4]; let b = [-1.55, 22.2, 63.3, -64.4]; let mut c = [0.0; 4]; for i in 0..4 { c[i] = a[i] * b[i]; } // ------------------------------------- let mut serial = enable_serial_device(); writeln!( serial, "Hello World from Roottask: hip_ptr={:?}, utcb_ptr:{:?}", hip_ptr, utcb_ptr ) .unwrap(); writeln!(serial, "a[{:?}] * b[{:?}] = c[{:?}]", a, b, c).unwrap(); panic!("game over") } /// Performs a `PD_CTRL_DELEGATE`-syscall. Roottask maps itself the permissions /// for the serial ports. It needs ports 0x38f + the seven ports after that. /// /// Returns the port object from [`uart_16550`]. fn enable_serial_device() -> SerialPort { pd_ctrl_delegate( ROOTTASK_CAPSEL, ROOTTASK_CAPSEL, // order 3: means 2^3 == 8 => map 8 ports at once => optimization of NOVA/Hedron syscall interface CrdPortIO::new(SERIAL_PORT, 3), CrdPortIO::new(SERIAL_PORT, 3), // most important boolean flag: "use hypervisor as src" DelegateFlags::new(true, false, false, true, 0), ) .unwrap(); // initialize the driver of the serial device behind the I/O port unsafe { uart_16550::SerialPort::new(SERIAL_PORT) } } // required by the Rust compiler. #[panic_handler] fn panic_handler(_info: &PanicInfo) -> ! { loop {} }
rust_entry
016.py
""" Project Euler - Problem Solution 016 Copyright (c) Justin McGettigan. All rights reserved. https://github.com/jwmcgettigan/project-euler-solutions """ def sum_digits(n): s = 0 while n: s, n = s + n % 10, n // 10 return s def power_digit_sum(power): return sum_digits(2**power) if __name__ == "__main__":
print(power_digit_sum(1000))
http.go
package utils import ( "fmt" "net/http" "time" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/auth" "github.com/docker/notary/tuf/signed" "github.com/gorilla/mux" "golang.org/x/net/context" ) // ContextHandler defines an alterate HTTP handler interface which takes in // a context for authorization and returns an HTTP application error. type ContextHandler func(ctx context.Context, w http.ResponseWriter, r *http.Request) error // rootHandler is an implementation of an HTTP request handler which handles // authorization and calling out to the defined alternate http handler. type rootHandler struct { handler ContextHandler auth auth.AccessController actions []string context context.Context trust signed.CryptoService //cachePool redis.Pool } // RootHandlerFactory creates a new rootHandler factory using the given // Context creator and authorizer. The returned factory allows creating // new rootHandlers from the alternate http handler contextHandler and // a scope. func RootHandlerFactory(auth auth.AccessController, ctx context.Context, trust signed.CryptoService) func(ContextHandler, ...string) *rootHandler { return func(handler ContextHandler, actions ...string) *rootHandler { return &rootHandler{ handler: handler, auth: auth, actions: actions, context: ctx, trust: trust, } } } // ServeHTTP serves an HTTP request and implements the http.Handler interface. func (root *rootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) ctx := ctxu.WithRequest(root.context, r) log := ctxu.GetRequestLogger(ctx) ctx, w = ctxu.WithResponseWriter(ctx, w) ctx = ctxu.WithLogger(ctx, log) ctx = context.WithValue(ctx, "repo", vars["imageName"]) ctx = context.WithValue(ctx, "cryptoService", root.trust) defer func() { ctxu.GetResponseLogger(ctx).Info("response completed") }() if root.auth != nil { access := buildAccessRecords(vars["imageName"], root.actions...) var authCtx context.Context var err error if authCtx, err = root.auth.Authorized(ctx, access...); err != nil { if challenge, ok := err.(auth.Challenge); ok { // Let the challenge write the response. challenge.SetHeaders(w) if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(access)); err != nil { log.Errorf("failed to serve challenge response: %s", err.Error()) } return } errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized) return } ctx = authCtx } if err := root.handler(ctx, w, r); err != nil { if httpErr, ok := err.(errcode.ErrorCoder); ok { // info level logging for non-5XX http errors httpErrCode := httpErr.ErrorCode().Descriptor().HTTPStatusCode if httpErrCode >= http.StatusInternalServerError { // error level logging for 5XX http errors log.Error(httpErr) } else { log.Info(httpErr) } } e := errcode.ServeJSON(w, err) if e != nil { log.Error(e) } return } } func buildAccessRecords(repo string, actions ...string) []auth.Access { requiredAccess := make([]auth.Access, 0, len(actions)) for _, action := range actions { requiredAccess = append(requiredAccess, auth.Access{ Resource: auth.Resource{ Type: "repository", Name: repo, }, Action: action, }) } return requiredAccess } // CacheControlConfig is an interface for something that knows how to set cache // control headers type CacheControlConfig interface { // SetHeaders will actually set the cache control headers on a Headers object SetHeaders(headers http.Header) } // NewCacheControlConfig returns CacheControlConfig interface for either setting // cache control or disabling cache control entirely func
(maxAgeInSeconds int, mustRevalidate bool) CacheControlConfig { if maxAgeInSeconds > 0 { return PublicCacheControl{MustReValidate: mustRevalidate, MaxAgeInSeconds: maxAgeInSeconds} } return NoCacheControl{} } // PublicCacheControl is a set of options that we will set to enable cache control type PublicCacheControl struct { MustReValidate bool MaxAgeInSeconds int } // SetHeaders sets the public headers with an optional must-revalidate header func (p PublicCacheControl) SetHeaders(headers http.Header) { cacheControlValue := fmt.Sprintf("public, max-age=%v, s-maxage=%v", p.MaxAgeInSeconds, p.MaxAgeInSeconds) if p.MustReValidate { cacheControlValue = fmt.Sprintf("%s, must-revalidate", cacheControlValue) } headers.Set("Cache-Control", cacheControlValue) // delete the Pragma directive, because the only valid value in HTTP is // "no-cache" headers.Del("Pragma") if headers.Get("Last-Modified") == "" { SetLastModifiedHeader(headers, time.Time{}) } } // NoCacheControl is an object which represents a directive to cache nothing type NoCacheControl struct{} // SetHeaders sets the public headers cache-control headers and pragma to no-cache func (n NoCacheControl) SetHeaders(headers http.Header) { headers.Set("Cache-Control", "max-age=0, no-cache, no-store") headers.Set("Pragma", "no-cache") } // cacheControlResponseWriter wraps an existing response writer, and if Write is // called, will try to set the cache control headers if it can type cacheControlResponseWriter struct { http.ResponseWriter config CacheControlConfig statusCode int } // WriteHeader stores the header before writing it, so we can tell if it's been set // to a non-200 status code func (c *cacheControlResponseWriter) WriteHeader(statusCode int) { c.statusCode = statusCode c.ResponseWriter.WriteHeader(statusCode) } // Write will set the cache headers if they haven't already been set and if the status // code has either not been set or set to 200 func (c *cacheControlResponseWriter) Write(data []byte) (int, error) { if c.statusCode == http.StatusOK || c.statusCode == 0 { headers := c.ResponseWriter.Header() if headers.Get("Cache-Control") == "" { c.config.SetHeaders(headers) } } return c.ResponseWriter.Write(data) } type cacheControlHandler struct { http.Handler config CacheControlConfig } func (c cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { c.Handler.ServeHTTP(&cacheControlResponseWriter{ResponseWriter: w, config: c.config}, r) } // WrapWithCacheHandler wraps another handler in one that can add cache control headers // given a 200 response func WrapWithCacheHandler(ccc CacheControlConfig, handler http.Handler) http.Handler { if ccc != nil { return cacheControlHandler{Handler: handler, config: ccc} } return handler } // SetLastModifiedHeader takes a time and uses it to set the LastModified header using // the right date format func SetLastModifiedHeader(headers http.Header, lmt time.Time) { headers.Set("Last-Modified", lmt.Format(time.RFC1123)) }
NewCacheControlConfig
stddev.rs
use super::variance::compute_variance as variance; use crate::math::utils::run_with_function; use nu_protocol::ast::Call; use nu_protocol::engine::{Command, EngineState, Stack}; use nu_protocol::{Category, Example, PipelineData, ShellError, Signature, Span, Value}; #[derive(Clone)] pub struct SubCommand; impl Command for SubCommand { fn name(&self) -> &str { "math stddev" } fn signature(&self) -> Signature { Signature::build("math stddev") .switch("sample", "calculate sample standard deviation", Some('s')) .category(Category::Math) } fn usage(&self) -> &str { "Finds the stddev of a list of numbers or tables" } fn run( &self, _engine_state: &EngineState, _stack: &mut Stack, call: &Call, input: PipelineData, ) -> Result<nu_protocol::PipelineData, nu_protocol::ShellError> { let sample = call.has_flag("sample"); run_with_function(call, input, compute_stddev(sample)) } fn examples(&self) -> Vec<Example> { vec![ Example { description: "Get the stddev of a list of numbers", example: "[1 2 3 4 5] | math stddev", result: Some(Value::Float { val: std::f64::consts::SQRT_2, span: Span::unknown(), }), }, Example { description: "Get the sample stddev of a list of numbers", example: "[1 2 3 4 5] | math stddev -s", result: Some(Value::Float { val: 1.5811388300841898, span: Span::unknown(), }), }, ] } } pub fn
(sample: bool) -> impl Fn(&[Value], &Span) -> Result<Value, ShellError> { move |values: &[Value], span: &Span| { let variance = variance(sample)(values, span); match variance { Ok(Value::Float { val, span }) => Ok(Value::Float { val: val.sqrt(), span }), Ok(Value::Int { val, span }) => Ok(Value::Float { val: (val as f64).sqrt(), span }), Err(ShellError::UnsupportedInput(_, err_span)) => Err(ShellError::UnsupportedInput( "Attempted to compute the standard deviation with an item that cannot be used for that.".to_string(), err_span, )), other => other } } } #[cfg(test)] mod test { use super::*; #[test] fn test_examples() { use crate::test_examples; test_examples(SubCommand {}) } }
compute_stddev
wenshu_spider.py
from scrapy import Spider import requests import redis class WenshuSpider(Spider):
name = 'wenshu' def __init__(self): super().__init__() self.r = redis.Redis(host='47.106.136.136', port=6388, password='qazwsx12!@') self.start_urls = [ '' ] def get_code(self, guid): url = 'http://wenshu.court.gov.cn/ValiCode/GetCode' s = requests.Session() s.headers.update( { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/66.0.3359.139 Safari/537.36', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' } ) data = { 'guid': guid } res = s.post(url, data=data) return res.text def get_content(self, Param='', Index=1, Page=5, Order='法院层级', Direction='asc', vl5x=None, number=None, guid=None, vjkl5=None): url = 'http://wenshu.court.gov.cn/List/ListContent' headers = ( { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/66.0.3359.139 Safari/537.36', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' } ) cookies = { 'vjkl5': vjkl5 } data = { 'Param': Param, 'Index': Index, 'Page': Page, 'Order': Order, 'Direction': Direction, 'vl5x': vl5x, 'number': number, 'guid': guid } res = requests.post(url, data=data, headers=headers, cookies=cookies) return res.text def get_vjkl5(self): url = 'http://wenshu.court.gov.cn/list/list/?sorttype=1' headers = ( { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/66.0.3359.139 Safari/537.36', } ) res = requests.get(url, headers=headers) return res.cookies['vjkl5']
alsa.rs
use crate::{ context::SAMPLE_RATE, device::{Device, FeedCallback, MixContext, NativeSample}, error::SoundError, }; use alsa_sys::*; use std::{ ffi::{CStr, CString}, mem::size_of, os::raw::c_int, }; pub struct AlsaSoundDevice { frame_count: u32, playback_device: *mut snd_pcm_t, callback: Box<FeedCallback>, out_data: Vec<NativeSample>, mix_buffer: Vec<(f32, f32)>, } unsafe impl Send for AlsaSoundDevice {} pub fn err_code_to_string(err_code: c_int) -> String { unsafe { let message = CStr::from_ptr(snd_strerror(err_code) as *const _) .to_bytes() .to_vec(); String::from_utf8(message).unwrap() } } pub fn check(err_code: c_int) -> Result<(), SoundError> { if err_code < 0 { Err(SoundError::FailedToInitializeDevice(err_code_to_string( err_code, ))) } else { Ok(()) } } impl AlsaSoundDevice { pub fn new<F: FnMut(&mut [(f32, f32)]) + Send + 'static>( buffer_len_bytes: u32, callback: F, ) -> Result<Self, SoundError> { unsafe { let name = CString::new("default").unwrap(); // 16-bit stereo is 4 bytes, so frame count is bufferHalfSize / 4 let frame_count = buffer_len_bytes / 4; let mut playback_device = std::ptr::null_mut(); check(snd_pcm_open( &mut playback_device, name.as_ptr() as *const _, SND_PCM_STREAM_PLAYBACK, 0, ))?; let mut hw_params = std::ptr::null_mut(); check(snd_pcm_hw_params_malloc(&mut hw_params))?; check(snd_pcm_hw_params_any(playback_device, hw_params))?;
hw_params, access, ))?; check(snd_pcm_hw_params_set_format( playback_device, hw_params, SND_PCM_FORMAT_S16_LE, ))?; let mut exact_rate = SAMPLE_RATE; check(snd_pcm_hw_params_set_rate_near( playback_device, hw_params, &mut exact_rate, std::ptr::null_mut(), ))?; check(snd_pcm_hw_params_set_channels( playback_device, hw_params, 2, ))?; check(snd_pcm_hw_params_set_period_size( playback_device, hw_params, frame_count as u64, 0, ))?; let mut exact_size = (frame_count * 2) as u64; check(snd_pcm_hw_params_set_buffer_size_near( playback_device, hw_params, &mut exact_size, ))?; check(snd_pcm_hw_params(playback_device, hw_params))?; snd_pcm_hw_params_free(hw_params); let mut sw_params = std::ptr::null_mut(); check(snd_pcm_sw_params_malloc(&mut sw_params))?; check(snd_pcm_sw_params_current(playback_device, sw_params))?; check(snd_pcm_sw_params_set_avail_min( playback_device, sw_params, frame_count.into(), ))?; check(snd_pcm_sw_params_set_start_threshold( playback_device, sw_params, frame_count.into(), ))?; check(snd_pcm_sw_params(playback_device, sw_params))?; check(snd_pcm_prepare(playback_device))?; let samples_per_channel = buffer_len_bytes as usize / size_of::<NativeSample>(); Ok(Self { playback_device, frame_count, callback: Box::new(callback), out_data: vec![Default::default(); samples_per_channel], mix_buffer: vec![(0.0, 0.0); samples_per_channel], }) } } } impl Device for AlsaSoundDevice { fn get_mix_context(&mut self) -> Option<MixContext> { Some(MixContext { mix_buffer: self.mix_buffer.as_mut_slice(), out_data: &mut self.out_data, callback: &mut self.callback, }) } fn run(&mut self) { loop { self.mix(); 'try_loop: for _ in 0..10 { unsafe { let err = snd_pcm_writei( self.playback_device, self.out_data.as_ptr() as *const _, self.frame_count.into(), ) as i32; if err < 0 { // Try to recover from any errors and re-send data. snd_pcm_recover(self.playback_device, err, 1); } else { break 'try_loop; } } } } } } impl Drop for AlsaSoundDevice { fn drop(&mut self) { unsafe { snd_pcm_close(self.playback_device); } } }
let access = SND_PCM_ACCESS_RW_INTERLEAVED; check(snd_pcm_hw_params_set_access( playback_device,
fft_prototype.py
import matplotlib.pyplot as plt from scipy.io import wavfile # get the api from scipy.fftpack import fft from pylab import * def
(filename): # song files are in ogg... we need it to be in wav. fs, data = wavfile.read(filename) # songs have multiple channels, but we only need one channel a = data.T[0] # this is 8-bit track, b is now normalized on [-1,1) #b=[(ele/2**16)*2-1 for ele in a] # create a list of complex number c = fft(a) # only need half of the fft list (because the internet says so) d = len(c)//2 #bam, it is plotted and saved. #plt.plot(abs(c[:(d-1)]),'r') #savefig(filename+'.png',bbox_inches='tight') return c guitar = f("auldlangguitar.wav") violin = f("auldlangviolin.wav") harmon = f("auldlangharmonica.wav") combine= f("combined.wav") cut = combine[:-14] combined2 = guitar + violin plt.plot(np.abs(guitar), 'r') #plt.show() savefig('guitarplot.png',bbox_inches='tight') gc = np.dot(guitar, combined2) vc = np.dot(violin, combined2) hc = np.dot(harmon, combined2) ng = guitar #/ np.linalg.norm(guitar) nv = violin #/ np.linalg.norm(violin) nh = harmon #/ np.linalg.norm(harmon) nc = combined2 #/ np.linalg.norm(cut) a = np.column_stack((ng, nv, nh)) x, res, rank, s = np.linalg.lstsq(a, nc) plt.plot(np.abs(ng * x[0]), 'r') #plt.show() savefig('decompguitarplot.png',bbox_inches='tight') decompGuitar = np.fft.ifft(ng * 1 + nv *1) print("X\n") print(x) print("decomp real") print(np.real(decompGuitar)) test = np.fft.ifft(guitar) decompreal = (decompGuitar) decompreal = decompreal #/ np.min(np.abs(decompreal[np.nonzero(decompreal)])) origfs, origdata = wavfile.read("auldlangguitar.wav") b = np.column_stack((decompGuitar.astype(origdata.dtype), decompGuitar.astype(origdata.dtype))) wavfile.write("decompguitar.wav", origfs, b) np.savetxt("guitar.csv", test.astype(uint8) , delimiter= ",") np.savetxt("combined.csv", combine, delimiter= ",") np.savetxt("channel2.csv", decompreal.astype(uint8), delimiter= ",") print("decomp orig") print(np.min(decompreal[np.nonzero(decompreal)]))
f
recover.rs
//! A middleware that recovers a resolution after some failures. use futures::stream::TryStream; use futures::{prelude::*, ready, FutureExt, Stream}; use linkerd2_error::{Error, Recover}; use linkerd2_proxy_core::resolve::{self, Update}; use pin_project::pin_project; use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; #[derive(Clone, Debug)] pub struct Eos(()); #[derive(Clone, Debug)] pub struct Resolve<E, R> { resolve: R, recover: E, } #[pin_project] pub struct ResolveFuture<T, E: Recover, R: resolve::Resolve<T>> { inner: Option<Inner<T, E, R>>, } #[pin_project(project = ResolutionProj)] pub struct Resolution<T, E: Recover, R: resolve::Resolve<T>> { inner: Inner<T, E, R>, } #[pin_project] struct Inner<T, E: Recover, R: resolve::Resolve<T>> { target: T, resolve: R, recover: E, state: State<R::Future, R::Resolution, E::Backoff>, } #[pin_project] enum State<F, R: TryStream, B> { Disconnected { backoff: Option<B>, }, Connecting { future: F, backoff: Option<B>, }, Connected { #[pin] resolution: R, is_initial: bool, }, Recover { error: Option<Error>, backoff: Option<B>, }, Backoff(Option<B>), } // === impl Resolve === impl<E, R> Resolve<E, R> { pub fn new(recover: E, resolve: R) -> Self
} impl<T, E, R> tower::Service<T> for Resolve<E, R> where T: Clone, R: resolve::Resolve<T> + Clone, R::Resolution: Unpin, R::Future: Unpin, R::Endpoint: Clone + PartialEq, E: Recover + Clone, E::Backoff: Unpin, { type Response = Resolution<T, E, R>; type Error = Error; type Future = ResolveFuture<T, E, R>; #[inline] fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.resolve.poll_ready(cx).map_err(Into::into) } #[inline] fn call(&mut self, target: T) -> Self::Future { let future = self.resolve.resolve(target.clone()); Self::Future { inner: Some(Inner { state: State::Connecting { future, backoff: None, }, target: target.clone(), recover: self.recover.clone(), resolve: self.resolve.clone(), }), } } } // === impl ResolveFuture === impl<T, E, R> Future for ResolveFuture<T, E, R> where T: Clone, R: resolve::Resolve<T>, R::Resolution: Unpin, R::Future: Unpin, R::Endpoint: Clone + PartialEq, E: Recover, E::Backoff: Unpin, { type Output = Result<Resolution<T, E, R>, Error>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let this = self.project(); // Wait until the resolution is connected. ready!(this .inner .as_mut() .expect("polled after complete") .poll_connected(cx))?; let inner = this.inner.take().expect("polled after complete"); Poll::Ready(Ok(Resolution { inner })) } } // === impl Resolution === impl<T, E, R> Stream for Resolution<T, E, R> where T: Clone, R: resolve::Resolve<T>, R::Future: Unpin, R::Resolution: Unpin, R::Endpoint: Clone + PartialEq, E: Recover, E::Backoff: Unpin, { type Item = Result<Update<R::Endpoint>, Error>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { let mut this = self.project(); loop { match this.inner.state { State::Connected { ref mut resolution, ref mut is_initial, } => match ready!(resolution.try_poll_next_unpin(cx)) { Some(Ok(Update::Remove(_))) if *is_initial => { debug_assert!(false, "Remove must not be initial update"); tracing::debug!("Ignoring Remove after connection"); // Continue polling until a useful update is received. } Some(Ok(update)) => { let update = if *is_initial { *is_initial = false; match update { Update::Add(eps) => Update::Reset(eps), up => up, } } else { update }; return Poll::Ready(Some(Ok(update))); } Some(Err(e)) => { this.inner.state = State::Recover { error: Some(e.into()), backoff: None, } } None => { this.inner.state = State::Recover { error: Some(Eos(()).into()), backoff: None, } } }, // XXX(eliza): note that this match was originally an `if let`, // but that doesn't work with `#[project]` for some kinda reason _ => {} } ready!(this.inner.poll_connected(cx))?; } } } // === impl Inner === impl<T, E, R> Inner<T, E, R> where T: Clone, R: resolve::Resolve<T>, R::Resolution: Unpin, R::Future: Unpin, R::Endpoint: Clone + PartialEq, E: Recover, E::Backoff: Unpin, { /// Drives the state forward until its connected. fn poll_connected(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Error>> { loop { self.state = match self.state { // When disconnected, start connecting. // // If we're recovering from a previous failure, we retain the // backoff in case this connection attempt fails. State::Disconnected { ref mut backoff } => { tracing::trace!("connecting"); ready!(self.resolve.poll_ready(cx).map_err(Into::into))?; let future = self.resolve.resolve(self.target.clone()); let backoff = backoff.take(); State::Connecting { future, backoff } } State::Connecting { ref mut future, ref mut backoff, } => match ready!(future.poll_unpin(cx)) { Ok(resolution) => { tracing::trace!("Connected"); State::Connected { resolution, is_initial: true, } } Err(e) => State::Recover { error: Some(e.into()), backoff: backoff.take(), }, }, State::Connected { .. } => return Poll::Ready(Ok(())), // If any stage failed, try to recover. If the error is // recoverable, start (or continue) backing off... State::Recover { ref mut error, ref mut backoff, } => { let err = error.take().expect("illegal state"); tracing::debug!(%err, "recovering"); let new_backoff = self.recover.recover(err)?; State::Backoff(backoff.take().or(Some(new_backoff))) } State::Backoff(ref mut backoff) => { let more = ready!(backoff.as_mut().expect("illegal state").poll_next_unpin(cx)); let backoff = if more.is_some() { backoff.take() } else { None }; tracing::trace!("disconnected"); State::Disconnected { backoff } } }; } } } impl std::fmt::Display for Eos { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "end of stream reached") } } impl std::error::Error for Eos {}
{ Self { resolve, recover } }
bme280.go
package BME280 import "fmt" import "time" import "log" import "golang.org/x/exp/io/i2c" // go get golang.org/x/exp/io/i2c type SensorIf interface { ReadData() (float64, float64, float64, error) } type Connection struct { conn *i2c.Device // settings oversampleHum byte oversampleTemp byte oversamplePres byte mode byte digT1 uint16 digT2 int16 digT3 int16 digH1 uint8 digH2 int16 digH3 uint8 digH4 int32 digH5 int32 digH6 int32 digP1 uint16 digP2 int16 digP3 int16 digP4 int16 digP5 int16 digP6 int16 digP7 int16 digP8 int16 digP9 int16 } // Registers (see datasheet at https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BME280_DS001-12.pdf) const ( REG_CHIP_ID = 0xD0 REG_CONTROL_HUM = 0xF2 REG_CONTROL = 0xF4 REG_CALIBRATION_01 = 0x88 REG_CALIBRATION_02 = 0xA1 REG_CALIBRATION_03 = 0xE1 REG_DATA = 0xF7 ) // Initiate connection via I2C func Connect(address uint8, bus int) (*Connection, error) { path := fmt.Sprintf("/dev/i2c-%d", bus) c, err := i2c.Open(&i2c.Devfs{Dev: path}, int(address)) if err != nil { return nil, err } this := &Connection{conn: c} this.oversampleHum = 2 this.oversampleTemp = 2 this.oversamplePres = 2 this.mode = 1 err = this.ReadCalibration() if err != nil { return nil, err } return this, nil } // Close I2C connection func (this *Connection) Disconnect() () { err := this.conn.Close() if err != nil { log.Fatal(err) } } // Read calibration values func (this *Connection) ReadCalibration() (error) { calib01 := make([]byte, 24) calib02 := make([]byte, 1) calib03 := make([]byte, 7) err := this.conn.ReadReg(REG_CALIBRATION_01, calib01)
if err != nil { return err } err = this.conn.ReadReg(REG_CALIBRATION_02, calib02) if err != nil { return err } err = this.conn.ReadReg(REG_CALIBRATION_03, calib03) if err != nil { return err } this.digT1 = uint16(calib01[1]) << 8 | uint16(calib01[0]) this.digT2 = int16(calib01[3]) << 8 | int16(calib01[2]) this.digT3 = int16(calib01[5]) << 8 | int16(calib01[4]) this.digH1 = uint8(calib02[0]) this.digH2 = int16(calib03[1]) << 8 | int16(calib03[0]) this.digH3 = uint8(calib03[2]) this.digH4 = int32(calib03[3]) this.digH4 = (this.digH4 << 24) >> 20 this.digH4 = this.digH4 | (int32(calib03[4]) & 0x0F) this.digH5 = int32(calib03[5]) this.digH5 = (this.digH5 << 24) >> 20 this.digH5 = this.digH5 | (int32(calib03[4]) >> 4 & 0x0F) this.digH6 = int32(calib03[6]) this.digP1 = uint16(calib01[7]) << 8 | uint16(calib01[6]) this.digP2 = int16(calib01[9]) << 8 | int16(calib01[8]) this.digP3 = int16(calib01[1]) << 8 | int16(calib01[10]) this.digP4 = int16(calib01[13]) << 8 | int16(calib01[12]) this.digP5 = int16(calib01[15]) << 8 | int16(calib01[14]) this.digP6 = int16(calib01[17]) << 8 | int16(calib01[16]) this.digP7 = int16(calib01[19]) << 8 | int16(calib01[18]) this.digP8 = int16(calib01[21]) << 8 | int16(calib01[20]) this.digP9 = int16(calib01[23]) << 8 | int16(calib01[22]) return nil } // Read chip ID func (this *Connection) ChipID() (byte, byte, error) { data := []byte{0, 0} err := this.conn.ReadReg(REG_CHIP_ID, data) if err != nil { return 0, 0, err } return data[0], data[1], nil } // Read temperature, humidity, pressure func (this *Connection) ReadData() (float64, float64, float64, error) { // write control hum err := this.conn.WriteReg(REG_CONTROL_HUM, []byte{this.oversampleHum}) if err != nil { return 0.0, 0.0, 0.0, err } // write other control control := []byte {this.oversampleTemp << 5 | this.oversamplePres << 2 | this.mode} err = this.conn.WriteReg(REG_CONTROL, control) if err != nil { return 0.0, 0.0, 0.0, err } // wait for the measurements are done (Datasheet Appendix B: Measurement time and current calculation) waitTime := 1.25 + (2.3 * float64(this.oversampleTemp)) + ((2.3 * float64(this.oversamplePres)) + 0.575) + ((2.3 * float64(this.oversampleHum))+0.575); time.Sleep(time.Duration(waitTime) * time.Millisecond) // read measurements rawData := make([]byte, 8) err = this.conn.ReadReg(REG_DATA, rawData) if err != nil { return 0.0, 0.0, 0.0, err } rawTemp := uint32(rawData[3]) << 12 | uint32(rawData[4]) << 4 | uint32(rawData[5]) >> 4 rawHum := uint32(rawData[6]) << 8 | uint32(rawData[7]) rawPres := uint32(rawData[0]) << 12 | uint32(rawData[1]) << 4 | uint32(rawData[2]) >> 4 // refine temperature value var1 := ((uint32(rawTemp >> 3) - uint32(this.digT1) << 1) * uint32(this.digT2)) >> 11 var2 := uint32(rawTemp >> 4) - uint32(this.digT1) var3 := (((var2 * var2) >> 12) * uint32(this.digT3)) >> 14 vart := var1 + var3 temperature := float64(((vart * 5) + 128) >> 8) // refine pressure varp1 := float64(vart) / 2.0 - 64000.0 varp2 := varp1 * varp1 * float64(this.digP6) / 32768.0 varp2 = varp2 + varp1 * float64(this.digP5) * 2.0 varp2 = varp2 / 4.0 + float64(this.digP4) * 65536.0 varp1 = (float64(this.digP3) * varp1 * varp1 / 524288.0 + float64(this.digP2) * varp1) / 524288.0 varp1 = (1.0 + varp1 / 32768.0) * float64(this.digP1) var pressure float64 if varp1 == 0 { pressure=0 } else { pressure = 1048576.0 - float64(rawPres) pressure = ((pressure - varp2 / 4096.0) * 6250.0) / varp1 varp1 = float64(this.digP9) * pressure * pressure / 2147483648.0 varp2 = pressure * float64(this.digP8) / 32768.0 pressure = pressure + (varp1 + varp2 + float64(this.digP7)) / 16.0 // convert to mmhg pressure = pressure / 1.33322387415 } // refine humidity value humidity := float64(vart) - 76800.0 humidity = (float64(rawHum) - (float64(this.digH4) * 64.0 + float64(this.digH5) / 16384.0 * humidity)) * (float64(this.digH2) / 65536.0 * (1.0 + float64(this.digH6) / 67108864.0 * humidity * (1.0 + float64(this.digH3) / 67108864.0 * humidity))) humidity = humidity * (1.0 - float64(this.digH1) * humidity / 524288.0) if humidity > 100.0 { humidity = 100.0 } else if humidity < 0.0 { humidity = 0.0 } return temperature/100.0, humidity, pressure/100.0, nil }
services.go
package initialize import ( "context" "encoding/json" "errors" "fmt" "net/http" "net/http/cookiejar" "time" "github.com/quay/clair/config" "github.com/quay/claircore/enricher/cvss" "github.com/quay/claircore/libindex" "github.com/quay/claircore/libvuln" "github.com/quay/claircore/libvuln/driver" "github.com/quay/zlog" "golang.org/x/net/publicsuffix" "gopkg.in/square/go-jose.v2/jwt" clairerror "github.com/quay/clair/v4/clair-error" "github.com/quay/clair/v4/httptransport" "github.com/quay/clair/v4/httptransport/client" "github.com/quay/clair/v4/indexer" "github.com/quay/clair/v4/internal/httputil" "github.com/quay/clair/v4/matcher" notifier "github.com/quay/clair/v4/notifier/service" ) const ( // NotifierIssuer is the value used for the issuer claim of any outgoing // HTTP requests the notifier makes, if PSK auth is configured. NotifierIssuer = `clair-notifier` ) var ( intraserviceClaim = jwt.Claims{Issuer: httptransport.IntraserviceIssuer} notifierClaim = jwt.Claims{Issuer: NotifierIssuer} ) // Srv is a bundle of configured Services. // // The members are populated according to the configuration that was passed to // Services. type Srv struct { Indexer indexer.Service Matcher matcher.Service Notifier notifier.Service } // Services configures the services needed for a given mode according to the // provided configuration. func Services(ctx context.Context, cfg *config.Config) (*Srv, error) { ctx = zlog.ContextWithValues(ctx, "component", "initialize/Services") zlog.Info(ctx).Msg("begin service initialization") defer zlog.Info(ctx).Msg("end service initialization") var srv Srv var err error switch cfg.Mode { case config.ComboMode: srv.Indexer, err = localIndexer(ctx, cfg) if err != nil { return nil, err } srv.Matcher, err = localMatcher(ctx, cfg) if err != nil { return nil, err } srv.Notifier, err = localNotifier(ctx, cfg, srv.Indexer, srv.Matcher) if err != nil { return nil, err } case config.IndexerMode: srv.Indexer, err = localIndexer(ctx, cfg) if err != nil { return nil, err } case config.MatcherMode: srv.Matcher, err = localMatcher(ctx, cfg) if err != nil { return nil, err }
if err != nil { return nil, err } case config.NotifierMode: srv.Indexer, err = remoteIndexer(ctx, cfg, cfg.Notifier.IndexerAddr) if err != nil { return nil, err } srv.Matcher, err = remoteMatcher(ctx, cfg, cfg.Notifier.MatcherAddr) if err != nil { return nil, err } srv.Notifier, err = localNotifier(ctx, cfg, srv.Indexer, srv.Matcher) if err != nil { return nil, err } default: return nil, fmt.Errorf("could not determine passed in mode: %v", cfg.Mode) } return &srv, nil } func localIndexer(ctx context.Context, cfg *config.Config) (indexer.Service, error) { const msg = "failed to initialize indexer: " mkErr := func(err error) *clairerror.ErrNotInitialized { return &clairerror.ErrNotInitialized{msg + err.Error()} } opts := libindex.Opts{ ConnString: cfg.Indexer.ConnString, ScanLockRetry: time.Duration(cfg.Indexer.ScanLockRetry) * time.Second, LayerScanConcurrency: cfg.Indexer.LayerScanConcurrency, Migrations: cfg.Indexer.Migrations, Airgap: cfg.Indexer.Airgap, } if cfg.Indexer.Scanner.Package != nil { opts.ScannerConfig.Package = make(map[string]func(interface{}) error, len(cfg.Indexer.Scanner.Package)) for name, node := range cfg.Indexer.Scanner.Package { node := node opts.ScannerConfig.Package[name] = func(v interface{}) error { b, err := json.Marshal(node) if err != nil { return err } return json.Unmarshal(b, v) } } } if cfg.Indexer.Scanner.Dist != nil { opts.ScannerConfig.Dist = make(map[string]func(interface{}) error, len(cfg.Indexer.Scanner.Dist)) for name, node := range cfg.Indexer.Scanner.Dist { node := node opts.ScannerConfig.Dist[name] = func(v interface{}) error { b, err := json.Marshal(node) if err != nil { return err } return json.Unmarshal(b, v) } } } if cfg.Indexer.Scanner.Repo != nil { opts.ScannerConfig.Repo = make(map[string]func(interface{}) error, len(cfg.Indexer.Scanner.Repo)) for name, node := range cfg.Indexer.Scanner.Repo { node := node opts.ScannerConfig.Repo[name] = func(v interface{}) error { b, err := json.Marshal(node) if err != nil { return err } return json.Unmarshal(b, v) } } } tr := http.DefaultTransport.(*http.Transport).Clone() // Use an empty claim because this shouldn't be talking to something that // needs preconfigured authz. Callers should be providing credentials to the // indexing process in the submitted manifest. c, _, err := httputil.Client(tr, nil, cfg) if err != nil { return nil, mkErr(err) } s, err := libindex.New(ctx, &opts, c) if err != nil { return nil, mkErr(err) } return s, nil } func remoteIndexer(ctx context.Context, cfg *config.Config, addr string) (indexer.Service, error) { const msg = "failed to initialize indexer client: " mkErr := func(err error) *clairerror.ErrNotInitialized { return &clairerror.ErrNotInitialized{msg + err.Error()} } rc, err := remoteClient(ctx, cfg, intraserviceClaim, addr) if err != nil { return nil, mkErr(err) } return rc, nil } func remoteClient(ctx context.Context, cfg *config.Config, claim jwt.Claims, addr string) (*client.HTTP, error) { tr := http.DefaultTransport.(*http.Transport).Clone() c, auth, err := httputil.Client(tr, &claim, cfg) switch { case err != nil: return nil, err case !auth && cfg.Auth.Any(): return nil, errors.New("client authorization required but not provided") default: // OK } return client.NewHTTP(ctx, client.WithAddr(addr), client.WithClient(c)) } func localMatcher(ctx context.Context, cfg *config.Config) (matcher.Service, error) { const msg = "failed to initialize matcher: " mkErr := func(err error) *clairerror.ErrNotInitialized { return &clairerror.ErrNotInitialized{ Msg: msg + err.Error(), } } tr := http.DefaultTransport.(*http.Transport).Clone() // Some servers return weak validators when the Content-Encoding is not // "identity". Setting this prevents automatically negotiating up to "gzip". tr.DisableCompression = true jar, err := cookiejar.New(&cookiejar.Options{ PublicSuffixList: publicsuffix.List, }) if err != nil { return nil, err } cl := &http.Client{ Jar: jar, Transport: httputil.RateLimiter(tr), } updaterConfigs := make(map[string]driver.ConfigUnmarshaler) for name, node := range cfg.Updaters.Config { node := node updaterConfigs[name] = func(v interface{}) error { b, err := json.Marshal(node) if err != nil { return err } return json.Unmarshal(b, v) } } matcherConfigs := make(map[string]driver.MatcherConfigUnmarshaler) for name, node := range cfg.Matchers.Config { node := node matcherConfigs[name] = func(v interface{}) error { b, err := json.Marshal(node) if err != nil { return err } return json.Unmarshal(b, v) } } s, err := libvuln.New(ctx, &libvuln.Opts{ MaxConnPool: int32(cfg.Matcher.MaxConnPool), ConnString: cfg.Matcher.ConnString, Migrations: cfg.Matcher.Migrations, UpdaterSets: cfg.Updaters.Sets, UpdateInterval: cfg.Matcher.Period, UpdaterConfigs: updaterConfigs, UpdateRetention: cfg.Matcher.UpdateRetention, MatcherNames: cfg.Matchers.Names, MatcherConfigs: matcherConfigs, Client: cl, Enrichers: []driver.Enricher{ &cvss.Enricher{}, }, }) if err != nil { return nil, mkErr(err) } return s, nil } func remoteMatcher(ctx context.Context, cfg *config.Config, addr string) (matcher.Service, error) { const msg = "failed to initialize matcher client: " mkErr := func(err error) *clairerror.ErrNotInitialized { return &clairerror.ErrNotInitialized{msg + err.Error()} } rc, err := remoteClient(ctx, cfg, intraserviceClaim, addr) if err != nil { return nil, mkErr(err) } return rc, nil } func localNotifier(ctx context.Context, cfg *config.Config, i indexer.Service, m matcher.Service) (notifier.Service, error) { const msg = "failed to initialize notifier: " mkErr := func(err error) *clairerror.ErrNotInitialized { return &clairerror.ErrNotInitialized{ Msg: msg + err.Error(), } } tr := http.DefaultTransport.(*http.Transport).Clone() c, _, err := httputil.Client(tr, &notifierClaim, cfg) if err != nil { return nil, mkErr(err) } s, err := notifier.New(ctx, notifier.Opts{ DeliveryInterval: cfg.Notifier.DeliveryInterval, ConnString: cfg.Notifier.ConnString, Indexer: i, Matcher: m, Client: c, Migrations: cfg.Notifier.Migrations, PollInterval: cfg.Notifier.PollInterval, DisableSummary: cfg.Notifier.DisableSummary, Webhook: cfg.Notifier.Webhook, AMQP: cfg.Notifier.AMQP, STOMP: cfg.Notifier.STOMP, }) if err != nil { return nil, mkErr(err) } return s, nil }
srv.Indexer, err = remoteIndexer(ctx, cfg, cfg.Matcher.IndexerAddr)
builder.go
// Copyright 2015 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package executor import ( "github.com/juju/errors" "github.com/pingcap/tidb/ast" "github.com/pingcap/tidb/context" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/model" "github.com/pingcap/tidb/parser/opcode" "github.com/pingcap/tidb/plan" "github.com/pingcap/tidb/sessionctx/autocommit" "github.com/pingcap/tidb/sessionctx/variable" ) // executorBuilder builds an Executor from a Plan. // The InfoSchema must be the same one used in InfoBinder. type executorBuilder struct { ctx context.Context is infoschema.InfoSchema err error } func newExecutorBuilder(ctx context.Context, is infoschema.InfoSchema) *executorBuilder { return &executorBuilder{ ctx: ctx, is: is, } } func (b *executorBuilder) build(p plan.Plan) Executor { switch v := p.(type) { case nil: return nil case *plan.CheckTable: return b.buildCheckTable(v) case *plan.DDL: return b.buildDDL(v) case *plan.Deallocate: return b.buildDeallocate(v) case *plan.NewDelete: return b.buildNewDelete(v) case *plan.Distinct: return b.buildDistinct(v) case *plan.Execute: return b.buildExecute(v) case *plan.Explain: return b.buildExplain(v) case *plan.Filter: src := b.build(v.GetChildByIndex(0)) return b.buildFilter(src, v.Conditions) case *plan.Insert: return b.buildInsert(v) case *plan.Limit: return b.buildLimit(v) case *plan.Prepare: return b.buildPrepare(v) case *plan.SelectLock: return b.buildSelectLock(v) case *plan.ShowDDL: return b.buildShowDDL(v) case *plan.Show: return b.buildShow(v) case *plan.Simple: return b.buildSimple(v) case *plan.NewSort: return b.buildNewSort(v) case *plan.NewUnion: return b.buildNewUnion(v) case *plan.NewUpdate: return b.buildNewUpdate(v) case *plan.PhysicalHashJoin: return b.buildJoin(v) case *plan.PhysicalHashSemiJoin: return b.buildSemiJoin(v) case *plan.Selection: return b.buildSelection(v) case *plan.Aggregation: return b.buildAggregation(v) case *plan.Projection: return b.buildProjection(v) case *plan.PhysicalTableScan: return b.buildNewTableScan(v, nil) case *plan.PhysicalIndexScan: return b.buildNewIndexScan(v, nil) case *plan.NewTableDual: return b.buildNewTableDual(v) case *plan.PhysicalApply: return b.buildApply(v) case *plan.Exists: return b.buildExists(v) case *plan.MaxOneRow: return b.buildMaxOneRow(v) case *plan.Trim: return b.buildTrim(v) case *plan.PhysicalDummyScan: return b.buildDummyScan(v) default: b.err = ErrUnknownPlan.Gen("Unknown Plan %T", p) return nil } } func (b *executorBuilder) buildFilter(src Executor, conditions []ast.ExprNode) Executor { if len(conditions) == 0 { return src } return &FilterExec{ Src: src, Condition: b.joinConditions(conditions), ctx: b.ctx, } } func (b *executorBuilder) buildShowDDL(v *plan.ShowDDL) Executor { return &ShowDDLExec{ fields: v.Fields(), ctx: b.ctx, } } func (b *executorBuilder) buildCheckTable(v *plan.CheckTable) Executor { return &CheckTableExec{ tables: v.Tables, ctx: b.ctx, } } func (b *executorBuilder) buildDeallocate(v *plan.Deallocate) Executor { return &DeallocateExec{ ctx: b.ctx, Name: v.Name, } } func (b *executorBuilder) joinConditions(conditions []ast.ExprNode) ast.ExprNode { if len(conditions) == 0 { return nil } if len(conditions) == 1 { return conditions[0] } condition := &ast.BinaryOperationExpr{ Op: opcode.AndAnd, L: conditions[0], R: b.joinConditions(conditions[1:]), } ast.MergeChildrenFlags(condition, condition.L, condition.R) return condition } func (b *executorBuilder) buildSelectLock(v *plan.SelectLock) Executor { src := b.build(v.GetChildByIndex(0)) ac, err := autocommit.ShouldAutocommit(b.ctx) if err != nil { b.err = errors.Trace(err) return src } if ac { // Locking of rows for update using SELECT FOR UPDATE only applies when autocommit // is disabled (either by beginning transaction with START TRANSACTION or by setting // autocommit to 0. If autocommit is enabled, the rows matching the specification are not locked. // See https://dev.mysql.com/doc/refman/5.7/en/innodb-locking-reads.html return src } e := &SelectLockExec{ Src: src, Lock: v.Lock, ctx: b.ctx, schema: v.GetSchema(), } return e } func (b *executorBuilder) buildLimit(v *plan.Limit) Executor { src := b.build(v.GetChildByIndex(0)) if x, ok := src.(NewXExecutor); ok { if x.AddLimit(v) && v.Offset == 0 { return src } } e := &LimitExec{ Src: src, Offset: v.Offset, Count: v.Count, schema: v.GetSchema(), } return e } func (b *executorBuilder) buildDistinct(v *plan.Distinct) Executor { return &DistinctExec{Src: b.build(v.GetChildByIndex(0)), schema: v.GetSchema()} } func (b *executorBuilder) buildPrepare(v *plan.Prepare) Executor { return &PrepareExec{ Ctx: b.ctx, IS: b.is, Name: v.Name, SQLText: v.SQLText, } } func (b *executorBuilder) buildExecute(v *plan.Execute) Executor { return &ExecuteExec{ Ctx: b.ctx, IS: b.is, Name: v.Name, UsingVars: v.UsingVars, ID: v.ID, } } func (b *executorBuilder) buildShow(v *plan.Show) Executor { e := &ShowExec{ Tp: v.Tp, DBName: model.NewCIStr(v.DBName), Table: v.Table, Column: v.Column, User: v.User, Flag: v.Flag, Full: v.Full, GlobalScope: v.GlobalScope, ctx: b.ctx, is: b.is, fields: v.Fields(), } if e.Tp == ast.ShowGrants && len(e.User) == 0 { e.User = variable.GetSessionVars(e.ctx).User } return e } func (b *executorBuilder) buildSimple(v *plan.Simple) Executor { switch s := v.Statement.(type) { case *ast.GrantStmt: return b.buildGrant(s) } return &SimpleExec{Statement: v.Statement, ctx: b.ctx} } func (b *executorBuilder) buildInsert(v *plan.Insert) Executor { ivs := &InsertValues{ ctx: b.ctx, Columns: v.Columns, Lists: v.Lists, Setlist: v.Setlist, } if v.SelectPlan != nil { ivs.SelectExec = b.build(v.SelectPlan) } // Get Table ts, ok := v.Table.TableRefs.Left.(*ast.TableSource) if !ok { b.err = errors.New("Can not get table") return nil } tn, ok := ts.Source.(*ast.TableName) if !ok { b.err = errors.New("Can not get table") return nil } tableInfo := tn.TableInfo tbl, ok := b.is.TableByID(tableInfo.ID) if !ok { b.err = errors.Errorf("Can not get table %d", tableInfo.ID) return nil
ivs.Table = tbl if v.IsReplace { return b.buildReplace(ivs) } insert := &InsertExec{ InsertValues: ivs, OnDuplicate: v.OnDuplicate, Priority: v.Priority, Ignore: v.Ignore, } // fields is used to evaluate values expr. insert.fields = ts.GetResultFields() return insert } func (b *executorBuilder) buildReplace(vals *InsertValues) Executor { return &ReplaceExec{ InsertValues: vals, } } func (b *executorBuilder) buildGrant(grant *ast.GrantStmt) Executor { return &GrantExec{ ctx: b.ctx, Privs: grant.Privs, ObjectType: grant.ObjectType, Level: grant.Level, Users: grant.Users, } } func (b *executorBuilder) buildDDL(v *plan.DDL) Executor { return &DDLExec{Statement: v.Statement, ctx: b.ctx, is: b.is} } func (b *executorBuilder) buildExplain(v *plan.Explain) Executor { return &ExplainExec{ StmtPlan: v.StmtPlan, fields: v.Fields(), } } func (b *executorBuilder) buildNewUnionScanExec(src Executor, condition expression.Expression) *UnionScanExec { us := &UnionScanExec{ctx: b.ctx, Src: src} switch x := src.(type) { case *NewXSelectTableExec: us.desc = x.desc us.dirty = getDirtyDB(b.ctx).getDirtyTable(x.table.Meta().ID) us.newCondition = condition us.newBuildAndSortAddedRows(x.table, x.asName) case *NewXSelectIndexExec: us.desc = x.indexPlan.Desc for _, ic := range x.indexPlan.Index.Columns { for i, col := range x.indexPlan.GetSchema() { if col.ColName.L == ic.Name.L { us.usedIndex = append(us.usedIndex, i) break } } } us.dirty = getDirtyDB(b.ctx).getDirtyTable(x.table.Meta().ID) us.newCondition = condition us.newBuildAndSortAddedRows(x.table, x.asName) default: b.err = ErrUnknownPlan.Gen("Unknown Plan %T", src) } return us }
}
utils.go
package core import ( // nolint:gosec "crypto/sha1" "crypto/sha256" "encoding/json" "errors" "fmt" "net/http" "regexp" "strings" "sync/atomic" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3/s3manager" v5 "github.com/retailcrm/api-client-go/v5" v1 "github.com/retailcrm/mg-transport-api-client-go/v1" ) var defaultCurrencies = map[string]string{ "rub": "₽", "uah": "₴", "byn": "Br", "kzt": "₸", "usd": "$", "eur": "€", "prb": "PRB", "mdl": "L", "kgs": "с", "pln": "zł", "azn": "₼", "amd": "֏", "thb": "฿", "aed": "AED", "nok": "kr", "cad": "C$", "czk": "Kč", "sek": "kr", "dkk": "kr", "ron": "lei", "uzs": "So'm", "aud": "$", "chf": "₣", "inr": "₹", "bgn": "лв", "ngn": "₦", "huf": "ƒ", "ils": "₪", "try": "₺", "stn": "₡", "ars": "$", "bob": "Bs", "ves": "Bs", "gtq": "Q", "hnl": "L", "dop": "RD$", "cop": "COL$", "crc": "₡", "cup": "$MN", "nio": "C$", "pab": "B/", "pyg": "₲", "pen": "S/", "svc": "₡", "uyu": "$U", "clp": "Ch$", "gel": "₾", "gbp": "£", } // Utils service object. type Utils struct { IsDebug bool TokenCounter uint32 ConfigAWS ConfigAWS Logger LoggerInterface slashRegex *regexp.Regexp } // NewUtils will create new Utils instance. func NewUtils(awsConfig ConfigAWS, logger LoggerInterface, debug bool) *Utils { return &Utils{ IsDebug: debug,
ils(awsConfig ConfigAWS, debug bool, tokenCounter uint32) { u.TokenCounter = tokenCounter u.ConfigAWS = awsConfig u.IsDebug = debug u.slashRegex = slashRegex } // GenerateToken will generate long pseudo-random string. func (u *Utils) GenerateToken() string { c := atomic.AddUint32(&u.TokenCounter, 1) return fmt.Sprintf("%x", sha256.Sum256([]byte(fmt.Sprintf("%d%d", time.Now().UnixNano(), c)))) } // GetAPIClient will initialize RetailCRM api client from url and key. func (u *Utils) GetAPIClient(url, key string) (*v5.Client, int, error) { client := v5.New(url, key) client.Debug = u.IsDebug cr, status, e := client.APICredentials() if e != nil && e.Error() != "" { u.Logger.Error(url, status, e.Error(), cr) return nil, http.StatusInternalServerError, errors.New(e.Error()) } if !cr.Success { errMsg := "unknown error" if e != nil { if e.ApiError() != "" { errMsg = e.ApiError() } else if e.ApiErrors() != nil { errMsg = "" for key, errText := range e.ApiErrors() { errMsg += fmt.Sprintf("[%s: %s] ", key, errText) } } } u.Logger.Error(url, status, errMsg, cr) return nil, http.StatusBadRequest, errors.New("invalid credentials") } if res := u.checkCredentials(cr.Credentials); len(res) != 0 { u.Logger.Error(url, status, res) return nil, http.StatusBadRequest, errors.New("missing credentials") } return client, 0, nil } func (u *Utils) checkCredentials(credential []string) []string { rc := make([]string, len(credentialsTransport)) copy(rc, credentialsTransport) for _, vc := range credential { for kn, vn := range rc { if vn == vc { if len(rc) == 1 { rc = rc[:0] break } rc = append(rc[:kn], rc[kn+1:]...) } } } return rc } // UploadUserAvatar will upload avatar for user. func (u *Utils) UploadUserAvatar(url string) (picURLs3 string, err error) { s3Config := &aws.Config{ Credentials: credentials.NewStaticCredentials( u.ConfigAWS.AccessKeyID, u.ConfigAWS.SecretAccessKey, ""), Region: aws.String(u.ConfigAWS.Region), } s := session.Must(session.NewSession(s3Config)) uploader := s3manager.NewUploader(s) // nolint:gosec resp, err := http.Get(url) if err != nil { return } defer resp.Body.Close() if resp.StatusCode >= http.StatusBadRequest { return "", fmt.Errorf("get: %v code: %v", url, resp.StatusCode) } result, err := uploader.Upload(&s3manager.UploadInput{ Bucket: aws.String(u.ConfigAWS.Bucket), Key: aws.String(fmt.Sprintf("%v/%v.jpg", u.ConfigAWS.FolderName, u.GenerateToken())), Body: resp.Body, ContentType: aws.String(u.ConfigAWS.ContentType), ACL: aws.String("public-read"), }) if err != nil { return } picURLs3 = result.Location return } // RemoveTrailingSlash will remove slash at the end of any string. func (u *Utils) RemoveTrailingSlash(crmURL string) string { return u.slashRegex.ReplaceAllString(crmURL, ``) } // GetMGItemData will upload file to MG by URL and return information about attachable item. func GetMGItemData(client *v1.MgClient, url string, caption string) (v1.Item, int, error) { item := v1.Item{} data, st, err := client.UploadFileByURL( v1.UploadFileByUrlRequest{ Url: url, }, ) if err != nil { return item, st, err } item.ID = data.ID item.Caption = caption return item, st, err } // GetEntitySHA1 will serialize any value to JSON and return SHA1 hash of this JSON. func GetEntitySHA1(v interface{}) (hash string, err error) { res, _ := json.Marshal(v) // nolint:gosec h := sha1.New() _, err = h.Write(res) hash = fmt.Sprintf("%x", h.Sum(nil)) return } // ReplaceMarkdownSymbols will remove markdown symbols from text. func ReplaceMarkdownSymbols(s string) string { for _, v := range markdownSymbols { s = strings.Replace(s, v, "\\"+v, -1) } return s } // DefaultCurrencies will return default currencies list for all bots. func DefaultCurrencies() map[string]string { return defaultCurrencies } // GetCurrencySymbol returns currency symbol by it's ISO 4127 code. // It returns provided currency code in uppercase if currency symbol cannot be found. func GetCurrencySymbol(code string) string { if i, ok := DefaultCurrencies()[strings.ToLower(code)]; ok { return i } return strings.ToUpper(code) }
ConfigAWS: awsConfig, Logger: logger, TokenCounter: 0, slashRegex: slashRegex, } } // resetUtils. func (u *Utils) resetUt
get_workload_network_dns_service.py
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables __all__ = [ 'GetWorkloadNetworkDnsServiceResult', 'AwaitableGetWorkloadNetworkDnsServiceResult', 'get_workload_network_dns_service', ] @pulumi.output_type class GetWorkloadNetworkDnsServiceResult: """ NSX DNS Service """ def __init__(__self__, default_dns_zone=None, display_name=None, dns_service_ip=None, fqdn_zones=None, id=None, log_level=None, name=None, provisioning_state=None, revision=None, status=None, type=None): if default_dns_zone and not isinstance(default_dns_zone, str): raise TypeError("Expected argument 'default_dns_zone' to be a str") pulumi.set(__self__, "default_dns_zone", default_dns_zone) if display_name and not isinstance(display_name, str): raise TypeError("Expected argument 'display_name' to be a str") pulumi.set(__self__, "display_name", display_name) if dns_service_ip and not isinstance(dns_service_ip, str): raise TypeError("Expected argument 'dns_service_ip' to be a str") pulumi.set(__self__, "dns_service_ip", dns_service_ip) if fqdn_zones and not isinstance(fqdn_zones, list): raise TypeError("Expected argument 'fqdn_zones' to be a list") pulumi.set(__self__, "fqdn_zones", fqdn_zones) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if log_level and not isinstance(log_level, str): raise TypeError("Expected argument 'log_level' to be a str") pulumi.set(__self__, "log_level", log_level) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if provisioning_state and not isinstance(provisioning_state, str): raise TypeError("Expected argument 'provisioning_state' to be a str") pulumi.set(__self__, "provisioning_state", provisioning_state) if revision and not isinstance(revision, float): raise TypeError("Expected argument 'revision' to be a float") pulumi.set(__self__, "revision", revision) if status and not isinstance(status, str): raise TypeError("Expected argument 'status' to be a str") pulumi.set(__self__, "status", status) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter(name="defaultDnsZone") def default_dns_zone(self) -> Optional[str]: """ Default DNS zone of the DNS Service. """ return pulumi.get(self, "default_dns_zone") @property @pulumi.getter(name="displayName") def display_name(self) -> Optional[str]: """ Display name of the DNS Service. """ return pulumi.get(self, "display_name") @property @pulumi.getter(name="dnsServiceIp") def dns_service_ip(self) -> Optional[str]: """ DNS service IP of the DNS Service. """ return pulumi.get(self, "dns_service_ip") @property @pulumi.getter(name="fqdnZones") def fqdn_zones(self) -> Optional[Sequence[str]]: """ FQDN zones of the DNS Service. """ return pulumi.get(self, "fqdn_zones") @property @pulumi.getter def id(self) -> str: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter(name="logLevel") def log_level(self) -> Optional[str]: """ DNS Service log level. """ return pulumi.get(self, "log_level") @property @pulumi.getter def name(self) -> str: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The provisioning state """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def revision(self) -> Optional[float]: """ NSX revision number. """ return pulumi.get(self, "revision") @property @pulumi.getter def
(self) -> str: """ DNS Service status. """ return pulumi.get(self, "status") @property @pulumi.getter def type(self) -> str: """ Resource type. """ return pulumi.get(self, "type") class AwaitableGetWorkloadNetworkDnsServiceResult(GetWorkloadNetworkDnsServiceResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetWorkloadNetworkDnsServiceResult( default_dns_zone=self.default_dns_zone, display_name=self.display_name, dns_service_ip=self.dns_service_ip, fqdn_zones=self.fqdn_zones, id=self.id, log_level=self.log_level, name=self.name, provisioning_state=self.provisioning_state, revision=self.revision, status=self.status, type=self.type) def get_workload_network_dns_service(dns_service_id: Optional[str] = None, private_cloud_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkloadNetworkDnsServiceResult: """ NSX DNS Service :param str dns_service_id: NSX DNS Service identifier. Generally the same as the DNS Service's display name :param str private_cloud_name: Name of the private cloud :param str resource_group_name: The name of the resource group. The name is case insensitive. """ __args__ = dict() __args__['dnsServiceId'] = dns_service_id __args__['privateCloudName'] = private_cloud_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:avs/v20210101preview:getWorkloadNetworkDnsService', __args__, opts=opts, typ=GetWorkloadNetworkDnsServiceResult).value return AwaitableGetWorkloadNetworkDnsServiceResult( default_dns_zone=__ret__.default_dns_zone, display_name=__ret__.display_name, dns_service_ip=__ret__.dns_service_ip, fqdn_zones=__ret__.fqdn_zones, id=__ret__.id, log_level=__ret__.log_level, name=__ret__.name, provisioning_state=__ret__.provisioning_state, revision=__ret__.revision, status=__ret__.status, type=__ret__.type)
status
connection.go
package uaa import "net/http" //go:generate counterfeiter . Connection
Make(request *http.Request, passedResponse *Response) error }
// Connection creates and executes http requests type Connection interface {
statreceiver.go
/* Copyright 2013 The Perkeep AUTHORS Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package stats contains an in-memory StatReceiver that only stores sizes // of received blobs but not their contents. package stats // import "perkeep.org/pkg/blobserver/stats" import ( "context" "io" "io/ioutil" "sort" "sync" "perkeep.org/pkg/blob" ) // Receiver is a dummy blobserver.StatReceiver that doesn't // store anything; it just collects statistics. // // TODO: we have another copy of this same type in // camput/files.go. move them to a common place? well, the camput one // is probably going away at some point. type Receiver struct { sync.Mutex // guards Have Have map[blob.Ref]int64 } func (sr *Receiver) NumBlobs() int { sr.Lock() defer sr.Unlock() return len(sr.Have) } // Sizes returns the sorted blob sizes. func (sr *Receiver) Sizes() []int { sr.Lock() defer sr.Unlock() sizes := make([]int, 0, len(sr.Have)) for _, size := range sr.Have { sizes = append(sizes, int(size)) } sort.Ints(sizes) return sizes } func (sr *Receiver) SumBlobSize() int64 { sr.Lock() defer sr.Unlock() var sum int64 for _, v := range sr.Have { sum += v } return sum } func (sr *Receiver) ReceiveBlob(ctx context.Context, br blob.Ref, source io.Reader) (sb blob.SizedRef, err error) { n, err := io.Copy(ioutil.Discard, source) if err != nil { return } return sr.ReceiveRef(br, n) } func (sr *Receiver) ReceiveRef(br blob.Ref, size int64) (sb blob.SizedRef, err error) { sr.Lock() defer sr.Unlock() if sr.Have == nil { sr.Have = make(map[blob.Ref]int64) } sr.Have[br] = size return blob.SizedRef{br, uint32(size)}, nil } func (sr *Receiver) StatBlobs(ctx context.Context, blobs []blob.Ref, fn func(blob.SizedRef) error) error { var sized []blob.SizedRef sr.Lock() for _, br := range blobs { if size, ok := sr.Have[br]; ok { sized = append(sized, blob.SizedRef{br, uint32(size)}) } } sr.Unlock() // Call fn with no locks held: for _, sb := range sized { if err := fn(sb); err != nil { return err } } return nil } func (sr *Receiver) RemoveBlobs(ctx context.Context, blobs []blob.Ref) error { sr.Lock() defer sr.Unlock() for _, br := range blobs { delete(sr.Have, br) } return nil } func (sr *Receiver) EnumerateBlobs(ctx context.Context, dest chan<- blob.SizedRef, after string, limit int) error { sr.Lock() defer sr.Unlock() defer close(dest) refs := blob.SizedByRef{} for ref, size := range sr.Have { if after != "" && ref.String() <= after { continue } refs = append(refs, blob.SizedRef{Ref: ref, Size: uint32(size)}) } sort.Sort(refs) if len(refs) == 0 { return nil } if len(refs) <= limit { limit = len(refs) } for _, sb := range refs[:limit] {
return nil }
dest <- sb }
randomx_factory.rs
use crate::proof_of_work::monero_rx::MergeMineError; use log::*; use randomx_rs::{RandomXCache, RandomXDataset, RandomXError, RandomXFlag, RandomXVM}; use std::{ collections::HashMap, sync::{Arc, Mutex, RwLock}, time::Instant, }; const LOG_TARGET: &str = "c::pow::randomx_factory"; #[derive(Clone)] pub struct RandomXVMInstance { // Note: If a cache and dataset (if assigned) allocated to the VM drops, the VM will crash. // The cache and dataset for the VM need to be stored together with it since they are not // mix and match. instance: Arc<Mutex<(RandomXVM, RandomXCache, Option<RandomXDataset>)>>, _flags: RandomXFlag, } impl RandomXVMInstance { fn create(key: &[u8], flags: RandomXFlag) -> Result<Self, RandomXError> { let (flags, cache) = match RandomXCache::new(flags, key) { Ok(cache) => (flags, cache), Err(err) => { warn!( target: LOG_TARGET, "Error initializing randomx cache with flags {:?}. {:?}. Fallback to default flags", flags, err ); // This is informed by how RandomX falls back on any cache allocation failure // https://github.com/xmrig/xmrig/blob/02b2b87bb685ab83b132267aa3c2de0766f16b8b/src/crypto/rx/RxCache.cpp#L88 let flags = RandomXFlag::FLAG_DEFAULT; let cache = RandomXCache::new(flags, key)?; (flags, cache) }, }; // Note: Memory required per VM in light mode is 256MB let vm = RandomXVM::new(flags, Some(&cache), None)?; // Note: No dataset is initialized here because we want to run in light mode. Only a cache // is required by the VM for verification, giving it a dataset will only make the VM // consume more memory than necessary. Dataset is currently an optional value as it may be // useful at some point in future. // Note: RandomXFlag::FULL_MEM and RandomXFlag::LARGE_PAGES are incompatible with // light mode. These are not set by RandomX automatically even in fast mode. Ok(Self { instance: Arc::new(Mutex::new((vm, cache, None))), _flags: flags, }) } pub fn calculate_hash(&self, input: &[u8]) -> Result<Vec<u8>, RandomXError> { self.instance.lock().unwrap().0.calculate_hash(input) } } unsafe impl Send for RandomXVMInstance {} unsafe impl Sync for RandomXVMInstance {} // Thread safe impl of the inner impl #[derive(Clone)] pub struct RandomXFactory { inner: Arc<RwLock<RandomXFactoryInner>>, } impl Default for RandomXFactory { fn default() -> Self { Self::new(2) } } impl RandomXFactory { pub fn new(max_vms: usize) -> Self { Self { inner: Arc::new(RwLock::new(RandomXFactoryInner::new(max_vms))), } } pub fn create(&self, key: &[u8]) -> Result<RandomXVMInstance, MergeMineError> { let res; { let mut inner = self.inner.write().unwrap(); res = inner.create(key)?; } Ok(res) } pub fn get_count(&self) -> usize { let inner = self.inner.read().unwrap(); inner.get_count() } pub fn get_flags(&self) -> RandomXFlag { let inner = self.inner.read().unwrap(); inner.get_flags() } } struct RandomXFactoryInner { flags: RandomXFlag, vms: HashMap<Vec<u8>, (Instant, RandomXVMInstance)>, max_vms: usize, } impl RandomXFactoryInner { pub fn new(max_vms: usize) -> Self { let flags = RandomXFlag::get_recommended_flags(); debug!( target: LOG_TARGET, "RandomX factory started with {} max VMs and recommended flags = {:?}", max_vms, flags ); Self { flags, vms: Default::default(), max_vms, } } pub fn create(&mut self, key: &[u8]) -> Result<RandomXVMInstance, MergeMineError> { if let Some(entry) = self.vms.get_mut(key) { let vm = entry.1.clone(); entry.0 = Instant::now(); return Ok(vm); } if self.vms.len() >= self.max_vms { let mut oldest_value = Instant::now(); let mut oldest_key = None; for (k, v) in self.vms.iter() { if v.0 < oldest_value { oldest_key = Some(k.clone()); oldest_value = v.0; } } if let Some(k) = oldest_key
} let vm = RandomXVMInstance::create(key, self.flags)?; self.vms.insert(Vec::from(key), (Instant::now(), vm.clone())); Ok(vm) } pub fn get_count(&self) -> usize { self.vms.len() } pub fn get_flags(&self) -> RandomXFlag { self.flags } } #[cfg(test)] mod test { use super::*; #[test] fn basic_initialization_and_hash() { let factory = RandomXFactory::new(2); let key = b"some-key"; let vm = factory.create(&key[..]).unwrap(); let preimage = b"hashme"; let hash1 = vm.calculate_hash(&preimage[..]).unwrap(); let vm = factory.create(&key[..]).unwrap(); assert_eq!(vm.calculate_hash(&preimage[..]).unwrap(), hash1); let key = b"another-key"; let vm = factory.create(&key[..]).unwrap(); assert_ne!(vm.calculate_hash(&preimage[..]).unwrap(), hash1); } }
{ self.vms.remove(&k); }
discrete_model.py
""" Limited dependent variable and qualitative variables. Includes binary outcomes, count data, (ordered) ordinal data and limited dependent variables. General References -------------------- A.C. Cameron and P.K. Trivedi. `Regression Analysis of Count Data`. Cambridge, 1998 G.S. Madalla. `Limited-Dependent and Qualitative Variables in Econometrics`. Cambridge, 1983. W. Greene. `Econometric Analysis`. Prentice Hall, 5th. edition. 2003. """ from __future__ import division __all__ = ["Poisson", "Logit", "Probit", "MNLogit", "NegativeBinomial"] from statsmodels.compat.python import lmap, lzip, range import numpy as np from scipy.special import gammaln from scipy import stats, special, optimize # opt just for nbin import statsmodels.tools.tools as tools from statsmodels.tools import data as data_tools from statsmodels.tools.decorators import (resettable_cache, cache_readonly) from statsmodels.regression.linear_model import OLS from scipy import stats, special, optimize # opt just for nbin from scipy.stats import nbinom from statsmodels.tools.sm_exceptions import PerfectSeparationError from statsmodels.tools.numdiff import (approx_fprime, approx_hess, approx_hess_cs, approx_fprime_cs) import statsmodels.base.model as base from statsmodels.base.data import handle_data # for mnlogit import statsmodels.regression.linear_model as lm import statsmodels.base.wrapper as wrap from statsmodels.compat.numpy import np_matrix_rank from pandas.core.api import get_dummies from statsmodels.base.l1_slsqp import fit_l1_slsqp try: import cvxopt have_cvxopt = True except ImportError: have_cvxopt = False #TODO: When we eventually get user-settable precision, we need to change # this FLOAT_EPS = np.finfo(float).eps #TODO: add options for the parameter covariance/variance # ie., OIM, EIM, and BHHH see Green 21.4 _discrete_models_docs = """ """ _discrete_results_docs = """ %(one_line_description)s Parameters ---------- model : A DiscreteModel instance params : array-like The parameters of a fitted model. hessian : array-like The hessian of the fitted model. scale : float A scale parameter for the covariance matrix. Returns ------- *Attributes* aic : float Akaike information criterion. `-2*(llf - p)` where `p` is the number of regressors including the intercept. bic : float Bayesian information criterion. `-2*llf + ln(nobs)*p` where `p` is the number of regressors including the intercept. bse : array The standard errors of the coefficients. df_resid : float See model definition. df_model : float See model definition. fitted_values : array Linear predictor XB. llf : float Value of the loglikelihood llnull : float Value of the constant-only loglikelihood llr : float Likelihood ratio chi-squared statistic; `-2*(llnull - llf)` llr_pvalue : float The chi-squared probability of getting a log-likelihood ratio statistic greater than llr. llr has a chi-squared distribution with degrees of freedom `df_model`. prsquared : float McFadden's pseudo-R-squared. `1 - (llf / llnull)` %(extra_attr)s""" _l1_results_attr = """ nnz_params : Integer The number of nonzero parameters in the model. Train with trim_params == True or else numerical error will distort this. trimmed : Boolean array trimmed[i] == True if the ith parameter was trimmed from the model.""" # helper for MNLogit (will be generally useful later) def _numpy_to_dummies(endog): if endog.dtype.kind in ['S', 'O']: endog_dummies, ynames = tools.categorical(endog, drop=True, dictnames=True) elif endog.ndim == 2: endog_dummies = endog ynames = range(endog.shape[1]) else: endog_dummies, ynames = tools.categorical(endog, drop=True, dictnames=True) return endog_dummies, ynames def _pandas_to_dummies(endog): if endog.ndim == 2: if endog.shape[1] == 1: yname = endog.columns[0] endog_dummies = get_dummies(endog.iloc[:, 0]) else: # series yname = 'y' endog_dummies = endog else: yname = endog.name endog_dummies = get_dummies(endog) ynames = endog_dummies.columns.tolist() return endog_dummies, ynames, yname #### Private Model Classes #### class DiscreteModel(base.LikelihoodModel): """ Abstract class for discrete choice models. This class does not do anything itself but lays out the methods and call signature expected of child classes in addition to those of statsmodels.model.LikelihoodModel. """ def __init__(self, endog, exog, **kwargs): super(DiscreteModel, self).__init__(endog, exog, **kwargs) self.raise_on_perfect_prediction = True def initialize(self): """ Initialize is called by statsmodels.model.LikelihoodModel.__init__ and should contain any preprocessing that needs to be done for a model. """ # assumes constant self.df_model = float(np_matrix_rank(self.exog) - 1) self.df_resid = (float(self.exog.shape[0] - np_matrix_rank(self.exog))) def cdf(self, X): """ The cumulative distribution function of the model. """ raise NotImplementedError def pdf(self, X): """ The probability density (mass) function of the model. """ raise NotImplementedError def _check_perfect_pred(self, params, *args): endog = self.endog fittedvalues = self.cdf(np.dot(self.exog, params[:self.exog.shape[1]])) if (self.raise_on_perfect_prediction and np.allclose(fittedvalues - endog, 0)): msg = "Perfect separation detected, results not available" raise PerfectSeparationError(msg) def fit(self, start_params=None, method='newton', maxiter=35, full_output=1, disp=1, callback=None, **kwargs): """ Fit the model using maximum likelihood. The rest of the docstring is from statsmodels.base.model.LikelihoodModel.fit """ if callback is None: callback = self._check_perfect_pred else: pass # make a function factory to have multiple call-backs mlefit = super(DiscreteModel, self).fit(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs) return mlefit # up to subclasses to wrap results fit.__doc__ += base.LikelihoodModel.fit.__doc__ def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=True, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, qc_verbose=False, **kwargs): """ Fit the model using a regularized maximum likelihood. The regularization method AND the solver used is determined by the argument method. Parameters ---------- start_params : array-like, optional Initial guess of the solution for the loglikelihood maximization. The default is an array of zeros. method : 'l1' or 'l1_cvxopt_cp' See notes for details. maxiter : Integer or 'defined_by_method' Maximum number of iterations to perform. If 'defined_by_method', then use method defaults (see notes). full_output : bool Set to True to have all available output in the Results object's mle_retvals attribute. The output is dependent on the solver. See LikelihoodModelResults notes section for more information. disp : bool Set to True to print convergence messages. fargs : tuple Extra arguments passed to the likelihood function, i.e., loglike(x,*args) callback : callable callback(xk) Called after each iteration, as callback(xk), where xk is the current parameter vector. retall : bool Set to True to return list of solutions at each iteration. Available in Results object's mle_retvals attribute. alpha : non-negative scalar or numpy array (same size as parameters) The weight multiplying the l1 penalty term trim_mode : 'auto, 'size', or 'off' If not 'off', trim (set to zero) parameters that would have been zero if the solver reached the theoretical minimum. If 'auto', trim params using the Theory above. If 'size', trim params if they have very small absolute value size_trim_tol : float or 'auto' (default = 'auto') For use when trim_mode == 'size' auto_trim_tol : float For sue when trim_mode == 'auto'. Use qc_tol : float Print warning and don't allow auto trim when (ii) (above) is violated by this much. qc_verbose : Boolean If true, print out a full QC report upon failure Notes ----- Extra parameters are not penalized if alpha is given as a scalar. An example is the shape parameter in NegativeBinomial `nb1` and `nb2`. Optional arguments for the solvers (available in Results.mle_settings):: 'l1' acc : float (default 1e-6) Requested accuracy as used by slsqp 'l1_cvxopt_cp' abstol : float absolute accuracy (default: 1e-7). reltol : float relative accuracy (default: 1e-6). feastol : float tolerance for feasibility conditions (default: 1e-7). refinement : int number of iterative refinement steps when solving KKT equations (default: 1). Optimization methodology With :math:`L` the negative log likelihood, we solve the convex but non-smooth problem .. math:: \\min_\\beta L(\\beta) + \\sum_k\\alpha_k |\\beta_k| via the transformation to the smooth, convex, constrained problem in twice as many variables (adding the "added variables" :math:`u_k`) .. math:: \\min_{\\beta,u} L(\\beta) + \\sum_k\\alpha_k u_k, subject to .. math:: -u_k \\leq \\beta_k \\leq u_k. With :math:`\\partial_k L` the derivative of :math:`L` in the :math:`k^{th}` parameter direction, theory dictates that, at the minimum, exactly one of two conditions holds: (i) :math:`|\\partial_k L| = \\alpha_k` and :math:`\\beta_k \\neq 0` (ii) :math:`|\\partial_k L| \\leq \\alpha_k` and :math:`\\beta_k = 0` """ ### Set attributes based on method if method in ['l1', 'l1_cvxopt_cp']: cov_params_func = self.cov_params_func_l1 else: raise Exception("argument method == %s, which is not handled" % method) ### Bundle up extra kwargs for the dictionary kwargs. These are ### passed through super(...).fit() as kwargs and unpacked at ### appropriate times alpha = np.array(alpha) assert alpha.min() >= 0 try: kwargs['alpha'] = alpha except TypeError: kwargs = dict(alpha=alpha) kwargs['alpha_rescaled'] = kwargs['alpha'] / float(self.endog.shape[0]) kwargs['trim_mode'] = trim_mode kwargs['size_trim_tol'] = size_trim_tol kwargs['auto_trim_tol'] = auto_trim_tol kwargs['qc_tol'] = qc_tol kwargs['qc_verbose'] = qc_verbose ### Define default keyword arguments to be passed to super(...).fit() if maxiter == 'defined_by_method': if method == 'l1': maxiter = 1000 elif method == 'l1_cvxopt_cp': maxiter = 70 ## Parameters to pass to super(...).fit() # For the 'extra' parameters, pass all that are available, # even if we know (at this point) we will only use one. extra_fit_funcs = {'l1': fit_l1_slsqp} if have_cvxopt and method == 'l1_cvxopt_cp': from statsmodels.base.l1_cvxopt import fit_l1_cvxopt_cp extra_fit_funcs['l1_cvxopt_cp'] = fit_l1_cvxopt_cp elif method.lower() == 'l1_cvxopt_cp': message = ("Attempt to use l1_cvxopt_cp failed since cvxopt " "could not be imported") if callback is None: callback = self._check_perfect_pred else: pass # make a function factory to have multiple call-backs mlefit = super(DiscreteModel, self).fit(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, extra_fit_funcs=extra_fit_funcs, cov_params_func=cov_params_func, **kwargs) return mlefit # up to subclasses to wrap results def cov_params_func_l1(self, likelihood_model, xopt, retvals): """ Computes cov_params on a reduced parameter space corresponding to the nonzero parameters resulting from the l1 regularized fit. Returns a full cov_params matrix, with entries corresponding to zero'd values set to np.nan. """ H = likelihood_model.hessian(xopt) trimmed = retvals['trimmed'] nz_idx = np.nonzero(trimmed == False)[0] nnz_params = (trimmed == False).sum() if nnz_params > 0: H_restricted = H[nz_idx[:, None], nz_idx] # Covariance estimate for the nonzero params H_restricted_inv = np.linalg.inv(-H_restricted) else: H_restricted_inv = np.zeros(0) cov_params = np.nan * np.ones(H.shape) cov_params[nz_idx[:, None], nz_idx] = H_restricted_inv return cov_params def predict(self, params, exog=None, linear=False): """ Predict response variable of a model given exogenous variables. """ raise NotImplementedError def _derivative_exog(self, params, exog=None, dummy_idx=None, count_idx=None): """ This should implement the derivative of the non-linear function """ raise NotImplementedError class BinaryModel(DiscreteModel): def __init__(self, endog, exog, **kwargs): super(BinaryModel, self).__init__(endog, exog, **kwargs) if (not issubclass(self.__class__, MultinomialModel) and not np.all((self.endog >= 0) & (self.endog <= 1))): raise ValueError("endog must be in the unit interval.") def predict(self, params, exog=None, linear=False): """ Predict response variable of a model given exogenous variables. Parameters ---------- params : array-like Fitted parameters of the model. exog : array-like 1d or 2d array of exogenous values. If not supplied, the whole exog attribute of the model is used. linear : bool, optional If True, returns the linear predictor dot(exog,params). Else, returns the value of the cdf at the linear predictor. Returns ------- array Fitted values at exog. """ if exog is None: exog = self.exog if not linear: return self.cdf(np.dot(exog, params)) else: return np.dot(exog, params) def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=1, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, **kwargs): bnryfit = super(BinaryModel, self).fit_regularized( start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs) if method in ['l1', 'l1_cvxopt_cp']: discretefit = L1BinaryResults(self, bnryfit) else: raise Exception( "argument method == %s, which is not handled" % method) return L1BinaryResultsWrapper(discretefit) fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__ def _derivative_predict(self, params, exog=None, transform='dydx'): """ For computing marginal effects standard errors. This is used only in the case of discrete and count regressors to get the variance-covariance of the marginal effects. It returns [d F / d params] where F is the predict. Transform can be 'dydx' or 'eydx'. Checking is done in margeff computations for appropriate transform. """ if exog is None: exog = self.exog dF = self.pdf(np.dot(exog, params))[:,None] * exog if 'ey' in transform: dF /= self.predict(params, exog)[:,None] return dF def _derivative_exog(self, params, exog=None, transform='dydx', dummy_idx=None, count_idx=None): """ For computing marginal effects returns dF(XB) / dX where F(.) is the predicted probabilities transform can be 'dydx', 'dyex', 'eydx', or 'eyex'. Not all of these make sense in the presence of discrete regressors, but checks are done in the results in get_margeff. """ #note, this form should be appropriate for ## group 1 probit, logit, logistic, cloglog, heckprob, xtprobit if exog is None: exog = self.exog margeff = np.dot(self.pdf(np.dot(exog, params))[:,None], params[None,:]) if 'ex' in transform: margeff *= exog if 'ey' in transform: margeff /= self.predict(params, exog)[:,None] if count_idx is not None: from statsmodels.discrete.discrete_margins import ( _get_count_effects) margeff = _get_count_effects(margeff, exog, count_idx, transform, self, params) if dummy_idx is not None: from statsmodels.discrete.discrete_margins import ( _get_dummy_effects) margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform, self, params) return margeff class MultinomialModel(BinaryModel): def _handle_data(self, endog, exog, missing, hasconst, **kwargs): if data_tools._is_using_ndarray_type(endog, None): endog_dummies, ynames = _numpy_to_dummies(endog) yname = 'y' elif data_tools._is_using_pandas(endog, None): endog_dummies, ynames, yname = _pandas_to_dummies(endog) else: endog = np.asarray(endog) endog_dummies, ynames = _numpy_to_dummies(endog) yname = 'y' if not isinstance(ynames, dict): ynames = dict(zip(range(endog_dummies.shape[1]), ynames)) self._ynames_map = ynames data = handle_data(endog_dummies, exog, missing, hasconst, **kwargs) data.ynames = yname # overwrite this to single endog name data.orig_endog = endog self.wendog = data.endog # repeating from upstream... for key in kwargs: try: setattr(self, key, data.__dict__.pop(key)) except KeyError: pass return data def initialize(self): """ Preprocesses the data for MNLogit. """ super(MultinomialModel, self).initialize() # This is also a "whiten" method in other models (eg regression) self.endog = self.endog.argmax(1) # turn it into an array of col idx self.J = self.wendog.shape[1] self.K = self.exog.shape[1] self.df_model *= (self.J-1) # for each J - 1 equation. self.df_resid = self.exog.shape[0] - self.df_model - (self.J-1) def predict(self, params, exog=None, linear=False): """ Predict response variable of a model given exogenous variables. Parameters ---------- params : array-like 2d array of fitted parameters of the model. Should be in the order returned from the model. exog : array-like 1d or 2d array of exogenous values. If not supplied, the whole exog attribute of the model is used. If a 1d array is given it assumed to be 1 row of exogenous variables. If you only have one regressor and would like to do prediction, you must provide a 2d array with shape[1] == 1. linear : bool, optional If True, returns the linear predictor dot(exog,params). Else, returns the value of the cdf at the linear predictor. Notes ----- Column 0 is the base case, the rest conform to the rows of params shifted up one for the base case. """ if exog is None: # do here to accomodate user-given exog exog = self.exog if exog.ndim == 1: exog = exog[None] pred = super(MultinomialModel, self).predict(params, exog, linear) if linear: pred = np.column_stack((np.zeros(len(exog)), pred)) return pred def fit(self, start_params=None, method='newton', maxiter=35, full_output=1, disp=1, callback=None, **kwargs): if start_params is None: start_params = np.zeros((self.K * (self.J-1))) else: start_params = np.asarray(start_params) callback = lambda x : None # placeholder until check_perfect_pred # skip calling super to handle results from LikelihoodModel mnfit = base.LikelihoodModel.fit(self, start_params = start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs) mnfit.params = mnfit.params.reshape(self.K, -1, order='F') mnfit = MultinomialResults(self, mnfit) return MultinomialResultsWrapper(mnfit) fit.__doc__ = DiscreteModel.fit.__doc__ def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=1, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, **kwargs): if start_params is None: start_params = np.zeros((self.K * (self.J-1))) else: start_params = np.asarray(start_params) mnfit = DiscreteModel.fit_regularized( self, start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs) mnfit.params = mnfit.params.reshape(self.K, -1, order='F') mnfit = L1MultinomialResults(self, mnfit) return L1MultinomialResultsWrapper(mnfit) fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__ def _derivative_predict(self, params, exog=None, transform='dydx'): """ For computing marginal effects standard errors. This is used only in the case of discrete and count regressors to get the variance-covariance of the marginal effects. It returns [d F / d params] where F is the predicted probabilities for each choice. dFdparams is of shape nobs x (J*K) x (J-1)*K. The zero derivatives for the base category are not included. Transform can be 'dydx' or 'eydx'. Checking is done in margeff computations for appropriate transform. """ if exog is None: exog = self.exog if params.ndim == 1: # will get flatted from approx_fprime params = params.reshape(self.K, self.J-1, order='F') eXB = np.exp(np.dot(exog, params)) sum_eXB = (1 + eXB.sum(1))[:,None] J, K = lmap(int, [self.J, self.K]) repeat_eXB = np.repeat(eXB, J, axis=1) X = np.tile(exog, J-1) # this is the derivative wrt the base level F0 = -repeat_eXB * X / sum_eXB ** 2 # this is the derivative wrt the other levels when # dF_j / dParams_j (ie., own equation) #NOTE: this computes too much, any easy way to cut down? F1 = eXB.T[:,:,None]*X * (sum_eXB - repeat_eXB) / (sum_eXB**2) F1 = F1.transpose((1,0,2)) # put the nobs index first # other equation index other_idx = ~np.kron(np.eye(J-1), np.ones(K)).astype(bool) F1[:, other_idx] = (-eXB.T[:,:,None]*X*repeat_eXB / \ (sum_eXB**2)).transpose((1,0,2))[:, other_idx] dFdX = np.concatenate((F0[:, None,:], F1), axis=1) if 'ey' in transform: dFdX /= self.predict(params, exog)[:, :, None] return dFdX def _derivative_exog(self, params, exog=None, transform='dydx', dummy_idx=None, count_idx=None): """ For computing marginal effects returns dF(XB) / dX where F(.) is the predicted probabilities transform can be 'dydx', 'dyex', 'eydx', or 'eyex'. Not all of these make sense in the presence of discrete regressors, but checks are done in the results in get_margeff. For Multinomial models the marginal effects are P[j] * (params[j] - sum_k P[k]*params[k]) It is returned unshaped, so that each row contains each of the J equations. This makes it easier to take derivatives of this for standard errors. If you want average marginal effects you can do margeff.reshape(nobs, K, J, order='F).mean(0) and the marginal effects for choice J are in column J """ J = int(self.J) # number of alternative choices K = int(self.K) # number of variables #note, this form should be appropriate for ## group 1 probit, logit, logistic, cloglog, heckprob, xtprobit if exog is None: exog = self.exog if params.ndim == 1: # will get flatted from approx_fprime params = params.reshape(K, J-1, order='F') zeroparams = np.c_[np.zeros(K), params] # add base in cdf = self.cdf(np.dot(exog, params)) margeff = np.array([cdf[:,[j]]* (zeroparams[:,j]-np.array([cdf[:,[i]]* zeroparams[:,i] for i in range(int(J))]).sum(0)) for j in range(J)]) margeff = np.transpose(margeff, (1,2,0)) # swap the axes to make sure margeff are in order nobs, K, J if 'ex' in transform: margeff *= exog if 'ey' in transform: margeff /= self.predict(params, exog)[:,None,:] if count_idx is not None: from statsmodels.discrete.discrete_margins import ( _get_count_effects) margeff = _get_count_effects(margeff, exog, count_idx, transform, self, params) if dummy_idx is not None: from statsmodels.discrete.discrete_margins import ( _get_dummy_effects) margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform, self, params) return margeff.reshape(len(exog), -1, order='F') class CountModel(DiscreteModel): def __init__(self, endog, exog, offset=None, exposure=None, missing='none', **kwargs): super(CountModel, self).__init__(endog, exog, missing=missing, offset=offset, exposure=exposure, **kwargs) if exposure is not None: self.exposure = np.log(self.exposure) self._check_inputs(self.offset, self.exposure, self.endog) if offset is None: delattr(self, 'offset') if exposure is None: delattr(self, 'exposure') def _check_inputs(self, offset, exposure, endog): if offset is not None and offset.shape[0] != endog.shape[0]: raise ValueError("offset is not the same length as endog") if exposure is not None and exposure.shape[0] != endog.shape[0]: raise ValueError("exposure is not the same length as endog") def _get_init_kwds(self): # this is a temporary fixup because exposure has been transformed # see #1609 kwds = super(CountModel, self)._get_init_kwds() if 'exposure' in kwds and kwds['exposure'] is not None: kwds['exposure'] = np.exp(kwds['exposure']) return kwds def predict(self, params, exog=None, exposure=None, offset=None, linear=False): """ Predict response variable of a count model given exogenous variables. Notes ----- If exposure is specified, then it will be logged by the method. The user does not need to log it first. """ #TODO: add offset tp if exog is None: exog = self.exog offset = getattr(self, 'offset', 0) exposure = getattr(self, 'exposure', 0) else: if exposure is None: exposure = 0 else: exposure = np.log(exposure) if offset is None: offset = 0 if not linear: return np.exp(np.dot(exog, params[:exog.shape[1]]) + exposure + offset) # not cdf else: return np.dot(exog, params[:exog.shape[1]]) + exposure + offset def _derivative_predict(self, params, exog=None, transform='dydx'): """ For computing marginal effects standard errors. This is used only in the case of discrete and count regressors to get the variance-covariance of the marginal effects. It returns [d F / d params] where F is the predict. Transform can be 'dydx' or 'eydx'. Checking is done in margeff computations for appropriate transform. """ if exog is None: exog = self.exog #NOTE: this handles offset and exposure dF = self.predict(params, exog)[:,None] * exog if 'ey' in transform: dF /= self.predict(params, exog)[:,None] return dF def _derivative_exog(self, params, exog=None, transform="dydx", dummy_idx=None, count_idx=None): """ For computing marginal effects. These are the marginal effects d F(XB) / dX For the Poisson model F(XB) is the predicted counts rather than the probabilities. transform can be 'dydx', 'dyex', 'eydx', or 'eyex'. Not all of these make sense in the presence of discrete regressors, but checks are done in the results in get_margeff. """ # group 3 poisson, nbreg, zip, zinb if exog is None: exog = self.exog margeff = self.predict(params, exog)[:,None] * params[None,:] if 'ex' in transform: margeff *= exog if 'ey' in transform: margeff /= self.predict(params, exog)[:,None] if count_idx is not None: from statsmodels.discrete.discrete_margins import ( _get_count_effects) margeff = _get_count_effects(margeff, exog, count_idx, transform, self, params) if dummy_idx is not None: from statsmodels.discrete.discrete_margins import ( _get_dummy_effects) margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform, self, params) return margeff def fit(self, start_params=None, method='newton', maxiter=35, full_output=1, disp=1, callback=None, **kwargs): cntfit = super(CountModel, self).fit(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs) discretefit = CountResults(self, cntfit) return CountResultsWrapper(discretefit) fit.__doc__ = DiscreteModel.fit.__doc__ def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=1, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, **kwargs): cntfit = super(CountModel, self).fit_regularized( start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs) if method in ['l1', 'l1_cvxopt_cp']: discretefit = L1CountResults(self, cntfit) else: raise Exception( "argument method == %s, which is not handled" % method) return L1CountResultsWrapper(discretefit) fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__ class OrderedModel(DiscreteModel): pass #### Public Model Classes #### class Poisson(CountModel): __doc__ = """ Poisson model for count data %(params)s %(extra_params)s Attributes ----------- endog : array A reference to the endogenous response variable exog : array A reference to the exogenous design. """ % {'params' : base._model_params_doc, 'extra_params' : """offset : array_like Offset is added to the linear prediction with coefficient equal to 1. exposure : array_like Log(exposure) is added to the linear prediction with coefficient equal to 1. """ + base._missing_param_doc} def cdf(self, X): """ Poisson model cumulative distribution function Parameters ----------- X : array-like `X` is the linear predictor of the model. See notes. Returns ------- The value of the Poisson CDF at each point. Notes ----- The CDF is defined as .. math:: \\exp\\left(-\\lambda\\right)\\sum_{i=0}^{y}\\frac{\\lambda^{i}}{i!} where :math:`\\lambda` assumes the loglinear model. I.e., .. math:: \\ln\\lambda_{i}=X\\beta The parameter `X` is :math:`X\\beta` in the above formula. """ y = self.endog return stats.poisson.cdf(y, np.exp(X)) def pdf(self, X): """ Poisson model probability mass function Parameters ----------- X : array-like `X` is the linear predictor of the model. See notes. Returns ------- pdf : ndarray The value of the Poisson probability mass function, PMF, for each point of X. Notes -------- The PMF is defined as .. math:: \\frac{e^{-\\lambda_{i}}\\lambda_{i}^{y_{i}}}{y_{i}!} where :math:`\\lambda` assumes the loglinear model. I.e., .. math:: \\ln\\lambda_{i}=x_{i}\\beta The parameter `X` is :math:`x_{i}\\beta` in the above formula. """ y = self.endog return np.exp(stats.poisson.logpmf(y, np.exp(X))) def loglike(self, params): """ Loglikelihood of Poisson model Parameters ---------- params : array-like The parameters of the model. Returns ------- loglike : float The log-likelihood function of the model evaluated at `params`. See notes. Notes -------- .. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right] """ offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) XB = np.dot(self.exog, params) + offset + exposure endog = self.endog return np.sum(-np.exp(XB) + endog*XB - gammaln(endog+1)) def loglikeobs(self, params): """ Loglikelihood for observations of Poisson model Parameters ---------- params : array-like The parameters of the model. Returns ------- loglike : ndarray (nobs,) The log likelihood for each observation of the model evaluated at `params`. See Notes Notes -------- .. math:: \\ln L_{i}=\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right] for observations :math:`i=1,...,n` """ offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) XB = np.dot(self.exog, params) + offset + exposure endog = self.endog #np.sum(stats.poisson.logpmf(endog, np.exp(XB))) return -np.exp(XB) + endog*XB - gammaln(endog+1) def fit(self, start_params=None, method='newton', maxiter=35, full_output=1, disp=1, callback=None, **kwargs): cntfit = super(CountModel, self).fit(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs) if 'cov_type' in kwargs: cov_kwds = kwargs.get('cov_kwds', {}) kwds = {'cov_type':kwargs['cov_type'], 'cov_kwds':cov_kwds} else: kwds = {} discretefit = PoissonResults(self, cntfit, **kwds) return PoissonResultsWrapper(discretefit) fit.__doc__ = DiscreteModel.fit.__doc__ def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=1, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, **kwargs): cntfit = super(CountModel, self).fit_regularized( start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs) if method in ['l1', 'l1_cvxopt_cp']: discretefit = L1PoissonResults(self, cntfit) else: raise Exception( "argument method == %s, which is not handled" % method) return L1PoissonResultsWrapper(discretefit) fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__ def fit_constrained(self, constraints, start_params=None, **fit_kwds): """fit the model subject to linear equality constraints The constraints are of the form `R params = q` where R is the constraint_matrix and q is the vector of constraint_values. The estimation creates a new model with transformed design matrix, exog, and converts the results back to the original parameterization. Parameters ---------- constraints : formula expression or tuple If it is a tuple, then the constraint needs to be given by two arrays (constraint_matrix, constraint_value), i.e. (R, q). Otherwise, the constraints can be given as strings or list of strings. see t_test for details start_params : None or array_like starting values for the optimization. `start_params` needs to be given in the original parameter space and are internally transformed. **fit_kwds : keyword arguments fit_kwds are used in the optimization of the transformed model. Returns ------- results : Results instance """ #constraints = (R, q) # TODO: temporary trailing underscore to not overwrite the monkey # patched version # TODO: decide whether to move the imports from patsy import DesignInfo from statsmodels.base._constraints import fit_constrained # same pattern as in base.LikelihoodModel.t_test lc = DesignInfo(self.exog_names).linear_constraint(constraints) R, q = lc.coefs, lc.constants # TODO: add start_params option, need access to tranformation # fit_constrained needs to do the transformation params, cov, res_constr = fit_constrained(self, R, q, start_params=start_params, fit_kwds=fit_kwds) #create dummy results Instance, TODO: wire up properly res = self.fit(maxiter=0, method='nm', disp=0, warn_convergence=False) # we get a wrapper back res.mle_retvals['fcall'] = res_constr.mle_retvals.get('fcall', np.nan) res.mle_retvals['iterations'] = res_constr.mle_retvals.get( 'iterations', np.nan) res.mle_retvals['converged'] = res_constr.mle_retvals['converged'] res._results.params = params res._results.normalized_cov_params = cov k_constr = len(q) res._results.df_resid += k_constr res._results.df_model -= k_constr res._results.constraints = lc res._results.k_constr = k_constr res._results.results_constrained = res_constr return res def score(self, params): """ Poisson model score (gradient) vector of the log-likelihood Parameters ---------- params : array-like The parameters of the model Returns ------- score : ndarray, 1-D The score vector of the model, i.e. the first derivative of the loglikelihood function, evaluated at `params` Notes ----- .. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left(y_{i}-\\lambda_{i}\\right)x_{i} where the loglinear model is assumed .. math:: \\ln\\lambda_{i}=x_{i}\\beta """ offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) X = self.exog L = np.exp(np.dot(X,params) + offset + exposure) return np.dot(self.endog - L, X) def score_obs(self, params): """ Poisson model Jacobian of the log-likelihood for each observation Parameters ---------- params : array-like The parameters of the model Returns ------- score : ndarray (nobs, k_vars) The score vector of the model evaluated at `params` Notes ----- .. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\lambda_{i}\\right)x_{i} for observations :math:`i=1,...,n` where the loglinear model is assumed .. math:: \\ln\\lambda_{i}=x_{i}\\beta """ offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) X = self.exog L = np.exp(np.dot(X,params) + offset + exposure) return (self.endog - L)[:,None] * X def hessian(self, params): """ Poisson model Hessian matrix of the loglikelihood Parameters ---------- params : array-like The parameters of the model Returns ------- hess : ndarray, (k_vars, k_vars) The Hessian, second derivative of loglikelihood function, evaluated at `params` Notes ----- .. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i=1}^{n}\\lambda_{i}x_{i}x_{i}^{\\prime} where the loglinear model is assumed .. math:: \\ln\\lambda_{i}=x_{i}\\beta """ offset = getattr(self, "offset", 0) exposure = getattr(self, "exposure", 0) X = self.exog L = np.exp(np.dot(X,params) + exposure + offset) return -np.dot(L*X.T, X) class Logit(BinaryModel): __doc__ = """ Binary choice logit model %(params)s %(extra_params)s Attributes ----------- endog : array A reference to the endogenous response variable exog : array A reference to the exogenous design. """ % {'params' : base._model_params_doc, 'extra_params' : base._missing_param_doc} def cdf(self, X): """ The logistic cumulative distribution function Parameters ---------- X : array-like `X` is the linear predictor of the logit model. See notes. Returns ------- 1/(1 + exp(-X)) Notes ------ In the logit model, .. math:: \\Lambda\\left(x^{\\prime}\\beta\\right)=\\text{Prob}\\left(Y=1|x\\right)=\\frac{e^{x^{\\prime}\\beta}}{1+e^{x^{\\prime}\\beta}} """ X = np.asarray(X) return 1/(1+np.exp(-X)) def pdf(self, X): """ The logistic probability density function Parameters ----------- X : array-like `X` is the linear predictor of the logit model. See notes. Returns ------- pdf : ndarray The value of the Logit probability mass function, PMF, for each point of X. ``np.exp(-x)/(1+np.exp(-X))**2`` Notes ----- In the logit model, .. math:: \\lambda\\left(x^{\\prime}\\beta\\right)=\\frac{e^{-x^{\\prime}\\beta}}{\\left(1+e^{-x^{\\prime}\\beta}\\right)^{2}} """ X = np.asarray(X) return np.exp(-X)/(1+np.exp(-X))**2 def loglike(self, params): """ Log-likelihood of logit model. Parameters ----------- params : array-like The parameters of the logit model. Returns ------- loglike : float The log-likelihood function of the model evaluated at `params`. See notes. Notes ------ .. math:: \\ln L=\\sum_{i}\\ln\\Lambda\\left(q_{i}x_{i}^{\\prime}\\beta\\right) Where :math:`q=2y-1`. This simplification comes from the fact that the logistic distribution is symmetric. """ q = 2*self.endog - 1 X = self.exog return np.sum(np.log(self.cdf(q*np.dot(X,params)))) def loglikeobs(self, params): """ Log-likelihood of logit model for each observation. Parameters ----------- params : array-like The parameters of the logit model. Returns ------- loglike : ndarray (nobs,) The log likelihood for each observation of the model evaluated at `params`. See Notes Notes ------ .. math:: \\ln L=\\sum_{i}\\ln\\Lambda\\left(q_{i}x_{i}^{\\prime}\\beta\\right) for observations :math:`i=1,...,n` where :math:`q=2y-1`. This simplification comes from the fact that the logistic distribution is symmetric. """ q = 2*self.endog - 1 X = self.exog return np.log(self.cdf(q*np.dot(X,params))) def score(self, params): """ Logit model score (gradient) vector of the log-likelihood Parameters ---------- params: array-like The parameters of the model Returns ------- score : ndarray, 1-D The score vector of the model, i.e. the first derivative of the loglikelihood function, evaluated at `params` Notes ----- .. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left(y_{i}-\\Lambda_{i}\\right)x_{i} """ y = self.endog X = self.exog L = self.cdf(np.dot(X,params)) return np.dot(y - L,X) def score_obs(self, params): """ Logit model Jacobian of the log-likelihood for each observation Parameters ---------- params: array-like The parameters of the model Returns ------- jac : ndarray, (nobs, k_vars) The derivative of the loglikelihood for each observation evaluated at `params`. Notes ----- .. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\Lambda_{i}\\right)x_{i} for observations :math:`i=1,...,n` """ y = self.endog X = self.exog L = self.cdf(np.dot(X, params)) return (y - L)[:,None] * X def hessian(self, params): """ Logit model Hessian matrix of the log-likelihood Parameters ---------- params : array-like The parameters of the model Returns ------- hess : ndarray, (k_vars, k_vars) The Hessian, second derivative of loglikelihood function, evaluated at `params` Notes ----- .. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i}\\Lambda_{i}\\left(1-\\Lambda_{i}\\right)x_{i}x_{i}^{\\prime} """ X = self.exog L = self.cdf(np.dot(X,params)) return -np.dot(L*(1-L)*X.T,X) def fit(self, start_params=None, method='newton', maxiter=35, full_output=1, disp=1, callback=None, **kwargs): bnryfit = super(Logit, self).fit(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs) discretefit = LogitResults(self, bnryfit) return BinaryResultsWrapper(discretefit) fit.__doc__ = DiscreteModel.fit.__doc__ class Probit(BinaryModel): __doc__ = """ Binary choice Probit model %(params)s %(extra_params)s Attributes ----------- endog : array A reference to the endogenous response variable exog : array A reference to the exogenous design. """ % {'params' : base._model_params_doc, 'extra_params' : base._missing_param_doc} def cdf(self, X): """ Probit (Normal) cumulative distribution function Parameters ---------- X : array-like The linear predictor of the model (XB). Returns -------- cdf : ndarray The cdf evaluated at `X`. Notes ----- This function is just an alias for scipy.stats.norm.cdf """ return stats.norm._cdf(X) def pdf(self, X): """ Probit (Normal) probability density function Parameters ---------- X : array-like The linear predictor of the model (XB). Returns -------- pdf : ndarray The value of the normal density function for each point of X. Notes ----- This function is just an alias for scipy.stats.norm.pdf """ X = np.asarray(X) return stats.norm._pdf(X) def loglike(self, params): """ Log-likelihood of probit model (i.e., the normal distribution). Parameters ---------- params : array-like The parameters of the model. Returns ------- loglike : float The log-likelihood function of the model evaluated at `params`. See notes. Notes ----- .. math:: \\ln L=\\sum_{i}\\ln\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right) Where :math:`q=2y-1`. This simplification comes from the fact that the normal distribution is symmetric. """ q = 2*self.endog - 1 X = self.exog return np.sum(np.log(np.clip(self.cdf(q*np.dot(X,params)), FLOAT_EPS, 1))) def loglikeobs(self, params): """ Log-likelihood of probit model for each observation Parameters ---------- params : array-like The parameters of the model. Returns ------- loglike : ndarray (nobs,) The log likelihood for each observation of the model evaluated at `params`. See Notes Notes ----- .. math:: \\ln L_{i}=\\ln\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right) for observations :math:`i=1,...,n` where :math:`q=2y-1`. This simplification comes from the fact that the normal distribution is symmetric. """ q = 2*self.endog - 1 X = self.exog return np.log(np.clip(self.cdf(q*np.dot(X,params)), FLOAT_EPS, 1)) def score(self, params): """ Probit model score (gradient) vector Parameters ---------- params : array-like The parameters of the model Returns ------- score : ndarray, 1-D The score vector of the model, i.e. the first derivative of the loglikelihood function, evaluated at `params` Notes ----- .. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left[\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}\\right]x_{i} Where :math:`q=2y-1`. This simplification comes from the fact that the normal distribution is symmetric. """ y = self.endog X = self.exog XB = np.dot(X,params) q = 2*y - 1 # clip to get rid of invalid divide complaint L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS) return np.dot(L,X) def score_obs(self, params): """ Probit model Jacobian for each observation Parameters ---------- params : array-like The parameters of the model Returns ------- jac : ndarray, (nobs, k_vars) The derivative of the loglikelihood for each observation evaluated at `params`. Notes ----- .. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left[\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}\\right]x_{i} for observations :math:`i=1,...,n` Where :math:`q=2y-1`. This simplification comes from the fact that the normal distribution is symmetric. """ y = self.endog X = self.exog XB = np.dot(X,params) q = 2*y - 1 # clip to get rid of invalid divide complaint L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS) return L[:,None] * X def hessian(self, params): """ Probit model Hessian matrix of the log-likelihood Parameters ---------- params : array-like The parameters of the model Returns ------- hess : ndarray, (k_vars, k_vars) The Hessian, second derivative of loglikelihood function, evaluated at `params` Notes ----- .. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\lambda_{i}\\left(\\lambda_{i}+x_{i}^{\\prime}\\beta\\right)x_{i}x_{i}^{\\prime} where .. math:: \\lambda_{i}=\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)} and :math:`q=2y-1` """ X = self.exog XB = np.dot(X,params) q = 2*self.endog - 1 L = q*self.pdf(q*XB)/self.cdf(q*XB) return np.dot(-L*(L+XB)*X.T,X) def fit(self, start_params=None, method='newton', maxiter=35, full_output=1, disp=1, callback=None, **kwargs): bnryfit = super(Probit, self).fit(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs) discretefit = ProbitResults(self, bnryfit) return BinaryResultsWrapper(discretefit) fit.__doc__ = DiscreteModel.fit.__doc__ class MNLogit(MultinomialModel): __doc__ = """ Multinomial logit model Parameters ---------- endog : array-like `endog` is an 1-d vector of the endogenous response. `endog` can contain strings, ints, or floats. Note that if it contains strings, every distinct string will be a category. No stripping of whitespace is done. exog : array-like A nobs x k array where `nobs` is the number of observations and `k` is the number of regressors. An intercept is not included by default and should be added by the user. See `statsmodels.tools.add_constant`. %(extra_params)s Attributes ---------- endog : array A reference to the endogenous response variable exog : array A reference to the exogenous design. J : float The number of choices for the endogenous variable. Note that this is zero-indexed. K : float The actual number of parameters for the exogenous design. Includes the constant if the design has one. names : dict A dictionary mapping the column number in `wendog` to the variables in `endog`. wendog : array An n x j array where j is the number of unique categories in `endog`. Each column of j is a dummy variable indicating the category of each observation. See `names` for a dictionary mapping each column to its category. Notes ----- See developer notes for further information on `MNLogit` internals. """ % {'extra_params' : base._missing_param_doc} def pdf(self, eXB): """ NotImplemented """ raise NotImplementedError def cdf(self, X): """ Multinomial logit cumulative distribution function. Parameters ---------- X : array The linear predictor of the model XB. Returns -------- cdf : ndarray The cdf evaluated at `X`. Notes ----- In the multinomial logit model. .. math:: \\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)} """ eXB = np.column_stack((np.ones(len(X)), np.exp(X))) return eXB/eXB.sum(1)[:,None] def loglike(self, params): """ Log-likelihood of the multinomial logit model. Parameters ---------- params : array-like The parameters of the multinomial logit model. Returns ------- loglike : float The log-likelihood function of the model evaluated at `params`. See notes. Notes ------ .. math:: \\ln L=\\sum_{i=1}^{n}\\sum_{j=0}^{J}d_{ij}\\ln\\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right) where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0 if not. """ params = params.reshape(self.K, -1, order='F') d = self.wendog logprob = np.log(self.cdf(np.dot(self.exog,params))) return np.sum(d * logprob) def loglikeobs(self, params): """ Log-likelihood of the multinomial logit model for each observation. Parameters ---------- params : array-like The parameters of the multinomial logit model. Returns ------- loglike : ndarray (nobs,) The log likelihood for each observation of the model evaluated at `params`. See Notes Notes ------ .. math:: \\ln L_{i}=\\sum_{j=0}^{J}d_{ij}\\ln\\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right) for observations :math:`i=1,...,n` where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0 if not. """ params = params.reshape(self.K, -1, order='F') d = self.wendog logprob = np.log(self.cdf(np.dot(self.exog,params))) return d * logprob def score(self, params): """ Score matrix for multinomial logit model log-likelihood Parameters ---------- params : array The parameters of the multinomial logit model. Returns -------- score : ndarray, (K * (J-1),) The 2-d score vector, i.e. the first derivative of the loglikelihood function, of the multinomial logit model evaluated at `params`. Notes ----- .. math:: \\frac{\\partial\\ln L}{\\partial\\beta_{j}}=\\sum_{i}\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i} for :math:`j=1,...,J` In the multinomial model the score matrix is K x J-1 but is returned as a flattened array to work with the solvers. """ params = params.reshape(self.K, -1, order='F') firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog, params))[:,1:] #NOTE: might need to switch terms if params is reshaped return np.dot(firstterm.T, self.exog).flatten() def loglike_and_score(self, params): """ Returns log likelihood and score, efficiently reusing calculations. Note that both of these returned quantities will need to be negated before being minimized by the maximum likelihood fitting machinery. """ params = params.reshape(self.K, -1, order='F') cdf_dot_exog_params = self.cdf(np.dot(self.exog, params)) loglike_value = np.sum(self.wendog * np.log(cdf_dot_exog_params)) firstterm = self.wendog[:, 1:] - cdf_dot_exog_params[:, 1:] score_array = np.dot(firstterm.T, self.exog).flatten() return loglike_value, score_array def score_obs(self, params): """ Jacobian matrix for multinomial logit model log-likelihood Parameters ---------- params : array The parameters of the multinomial logit model. Returns -------- jac : ndarray, (nobs, k_vars*(J-1)) The derivative of the loglikelihood for each observation evaluated at `params` . Notes ----- .. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta_{j}}=\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i} for :math:`j=1,...,J`, for observations :math:`i=1,...,n` In the multinomial model the score vector is K x (J-1) but is returned as a flattened array. The Jacobian has the observations in rows and the flatteded array of derivatives in columns. """ params = params.reshape(self.K, -1, order='F') firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog, params))[:,1:] #NOTE: might need to switch terms if params is reshaped return (firstterm[:,:,None] * self.exog[:,None,:]).reshape(self.exog.shape[0], -1) def hessian(self, params): """ Multinomial logit Hessian matrix of the log-likelihood Parameters ----------- params : array-like The parameters of the model Returns ------- hess : ndarray, (J*K, J*K) The Hessian, second derivative of loglikelihood function with respect to the flattened parameters, evaluated at `params` Notes ----- .. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta_{j}\\partial\\beta_{l}}=-\\sum_{i=1}^{n}\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\left[\\boldsymbol{1}\\left(j=l\\right)-\\frac{\\exp\\left(\\beta_{l}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right]x_{i}x_{l}^{\\prime} where :math:`\\boldsymbol{1}\\left(j=l\\right)` equals 1 if `j` = `l` and 0 otherwise. The actual Hessian matrix has J**2 * K x K elements. Our Hessian is reshaped to be square (J*K, J*K) so that the solvers can use it. This implementation does not take advantage of the symmetry of the Hessian and could probably be refactored for speed. """ params = params.reshape(self.K, -1, order='F') X = self.exog pr = self.cdf(np.dot(X,params)) partials = [] J = self.wendog.shape[1] - 1 K = self.exog.shape[1] for i in range(J): for j in range(J): # this loop assumes we drop the first col. if i == j: partials.append(\ -np.dot(((pr[:,i+1]*(1-pr[:,j+1]))[:,None]*X).T,X)) else: partials.append(-np.dot(((pr[:,i+1]*-pr[:,j+1])[:,None]*X).T,X)) H = np.array(partials) # the developer's notes on multinomial should clear this math up H = np.transpose(H.reshape(J,J,K,K), (0,2,1,3)).reshape(J*K,J*K) return H #TODO: Weibull can replaced by a survival analsysis function # like stat's streg (The cox model as well) #class Weibull(DiscreteModel): # """ # Binary choice Weibull model # # Notes # ------ # This is unfinished and untested. # """ ##TODO: add analytic hessian for Weibull # def initialize(self): # pass # # def cdf(self, X): # """ # Gumbell (Log Weibull) cumulative distribution function # """ ## return np.exp(-np.exp(-X)) # return stats.gumbel_r.cdf(X) # # these two are equivalent. # # Greene table and discussion is incorrect. # # def pdf(self, X): # """ # Gumbell (LogWeibull) probability distribution function # """ # return stats.gumbel_r.pdf(X) # # def loglike(self, params): # """ # Loglikelihood of Weibull distribution # """ # X = self.exog # cdf = self.cdf(np.dot(X,params)) # y = self.endog # return np.sum(y*np.log(cdf) + (1-y)*np.log(1-cdf)) # # def score(self, params): # y = self.endog # X = self.exog # F = self.cdf(np.dot(X,params)) # f = self.pdf(np.dot(X,params)) # term = (y*f/F + (1 - y)*-f/(1-F)) # return np.dot(term,X) # # def hessian(self, params): # hess = nd.Jacobian(self.score) # return hess(params) # # def fit(self, start_params=None, method='newton', maxiter=35, tol=1e-08): ## The example had problems with all zero start values, Hessian = 0 # if start_params is None: # start_params = OLS(self.endog, self.exog).fit().params # mlefit = super(Weibull, self).fit(start_params=start_params, # method=method, maxiter=maxiter, tol=tol) # return mlefit # class NegativeBinomial(CountModel): __doc__ = """ Negative Binomial Model for count data %(params)s %(extra_params)s Attributes ----------- endog : array A reference to the endogenous response variable exog : array A reference to the exogenous design. References ---------- References: Greene, W. 2008. "Functional forms for the negtive binomial model for count data". Economics Letters. Volume 99, Number 3, pp.585-590. Hilbe, J.M. 2011. "Negative binomial regression". Cambridge University Press. """ % {'params' : base._model_params_doc, 'extra_params' : """loglike_method : string Log-likelihood type. 'nb2','nb1', or 'geometric'. Fitted value :math:`\\mu` Heterogeneity parameter :math:`\\alpha` - nb2: Variance equal to :math:`\\mu + \\alpha\\mu^2` (most common) - nb1: Variance equal to :math:`\\mu + \\alpha\\mu` - geometric: Variance equal to :math:`\\mu + \\mu^2` offset : array_like Offset is added to the linear prediction with coefficient equal to 1. exposure : array_like Log(exposure) is added to the linear prediction with coefficient equal to 1. """ + base._missing_param_doc} def __init__(self, endog, exog, loglike_method='nb2', offset=None, exposure=None, missing='none', **kwargs): super(NegativeBinomial, self).__init__(endog, exog, offset=offset, exposure=exposure, missing=missing, **kwargs) self.loglike_method = loglike_method self._initialize() if loglike_method in ['nb2', 'nb1']: self.exog_names.append('alpha') self.k_extra = 1 else: self.k_extra = 0 # store keys for extras if we need to recreate model instance # we need to append keys that don't go to super self._init_keys.append('loglike_method') def _initialize(self): if self.loglike_method == 'nb2': self.hessian = self._hessian_nb2 self.score = self._score_nbin self.loglikeobs = self._ll_nb2 self._transparams = True # transform lnalpha -> alpha in fit elif self.loglike_method == 'nb1': self.hessian = self._hessian_nb1 self.score = self._score_nb1 self.loglikeobs = self._ll_nb1 self._transparams = True # transform lnalpha -> alpha in fit elif self.loglike_method == 'geometric': self.hessian = self._hessian_geom self.score = self._score_geom self.loglikeobs = self._ll_geometric else: raise NotImplementedError("Likelihood type must nb1, nb2 or " "geometric") # Workaround to pickle instance methods def __getstate__(self): odict = self.__dict__.copy() # copy the dict since we change it del odict['hessian'] del odict['score'] del odict['loglikeobs'] return odict def __setstate__(self, indict): self.__dict__.update(indict) self._initialize() def _ll_nbin(self, params, alpha, Q=0): endog = self.endog mu = self.predict(params) size = 1/alpha * mu**Q prob = size/(size+mu) coeff = (gammaln(size+endog) - gammaln(endog+1) - gammaln(size)) llf = coeff + size*np.log(prob) + endog*np.log(1-prob) return llf def _ll_nb2(self, params): if self._transparams: # got lnalpha during fit alpha = np.exp(params[-1]) else: alpha = params[-1] return self._ll_nbin(params[:-1], alpha, Q=0) def _ll_nb1(self, params): if self._transparams: # got lnalpha during fit alpha = np.exp(params[-1]) else: alpha = params[-1] return self._ll_nbin(params[:-1], alpha, Q=1) def _ll_geometric(self, params): # we give alpha of 1 because it's actually log(alpha) where alpha=0 return self._ll_nbin(params, 1, 0) def loglike(self, params): r""" Loglikelihood for negative binomial model Parameters ---------- params : array-like The parameters of the model. If `loglike_method` is nb1 or nb2, then the ancillary parameter is expected to be the last element. Returns ------- llf : float The loglikelihood value at `params` Notes ----- Following notation in Greene (2008), with negative binomial heterogeneity parameter :math:`\alpha`: .. math:: \lambda_i &= exp(X\beta) \\ \theta &= 1 / \alpha \\ g_i &= \theta \lambda_i^Q \\ w_i &= g_i/(g_i + \lambda_i) \\ r_i &= \theta / (\theta+\lambda_i) \\ ln \mathcal{L}_i &= ln \Gamma(y_i+g_i) - ln \Gamma(1+y_i) + g_iln (r_i) + y_i ln(1-r_i) where :math`Q=0` for NB2 and geometric and :math:`Q=1` for NB1. For the geometric, :math:`\alpha=0` as well. """ llf = np.sum(self.loglikeobs(params)) return llf def _score_geom(self, params): exog = self.exog y = self.endog[:,None] mu = self.predict(params)[:,None] dparams = exog * (y-mu)/(mu+1) return dparams.sum(0) def _score_nbin(self, params, Q=0): """ Score vector for NB2 model """ if self._transparams: # lnalpha came in during fit alpha = np.exp(params[-1]) else: alpha = params[-1] params = params[:-1] exog = self.exog y = self.endog[:,None] mu = self.predict(params)[:,None] a1 = 1/alpha * mu**Q if Q: # nb1 dparams = exog*mu/alpha*(np.log(1/(alpha + 1)) + special.digamma(y + mu/alpha) - special.digamma(mu/alpha)) dalpha = ((alpha*(y - mu*np.log(1/(alpha + 1)) - mu*(special.digamma(y + mu/alpha) - special.digamma(mu/alpha) + 1)) - mu*(np.log(1/(alpha + 1)) + special.digamma(y + mu/alpha) - special.digamma(mu/alpha)))/ (alpha**2*(alpha + 1))).sum() else: # nb2 dparams = exog*a1 * (y-mu)/(mu+a1) da1 = -alpha**-2 dalpha = (special.digamma(a1+y) - special.digamma(a1) + np.log(a1) - np.log(a1+mu) - (a1+y)/(a1+mu) + 1).sum()*da1 #multiply above by constant outside sum to reduce rounding error if self._transparams: return np.r_[dparams.sum(0), dalpha*alpha] else: return np.r_[dparams.sum(0), dalpha] def _score_nb1(self, params): return self._score_nbin(params, Q=1) def _hessian_geom(self, params): exog = self.exog y = self.endog[:,None] mu = self.predict(params)[:,None] # for dl/dparams dparams dim = exog.shape[1] hess_arr = np.empty((dim, dim)) const_arr = mu*(1+y)/(mu+1)**2 for i in range(dim): for j in range(dim): if j > i: continue hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] * const_arr, axis=0) tri_idx = np.triu_indices(dim, k=1) hess_arr[tri_idx] = hess_arr.T[tri_idx] return hess_arr def _hessian_nb1(self, params): """ Hessian of NB1 model. """ if self._transparams: # lnalpha came in during fit alpha = np.exp(params[-1]) else: alpha = params[-1] params = params[:-1] exog = self.exog y = self.endog[:,None] mu = self.predict(params)[:,None] a1 = mu/alpha # for dl/dparams dparams dim = exog.shape[1] hess_arr = np.empty((dim+1,dim+1)) #const_arr = a1*mu*(a1+y)/(mu+a1)**2 # not all of dparams dparams = exog/alpha*(np.log(1/(alpha + 1)) + special.digamma(y + mu/alpha) - special.digamma(mu/alpha)) dmudb = exog*mu xmu_alpha = exog*mu/alpha trigamma = (special.polygamma(1, mu/alpha + y) - special.polygamma(1, mu/alpha)) for i in range(dim): for j in range(dim): if j > i: continue hess_arr[i,j] = np.sum(dparams[:,i,None] * dmudb[:,j,None] + xmu_alpha[:,i,None] * xmu_alpha[:,j,None] * trigamma, axis=0) tri_idx = np.triu_indices(dim, k=1) hess_arr[tri_idx] = hess_arr.T[tri_idx] # for dl/dparams dalpha da1 = -alpha**-2 dldpda = np.sum(-mu/alpha * dparams + exog*mu/alpha * (-trigamma*mu/alpha**2 - 1/(alpha+1)), axis=0) hess_arr[-1,:-1] = dldpda hess_arr[:-1,-1] = dldpda # for dl/dalpha dalpha digamma_part = (special.digamma(y + mu/alpha) - special.digamma(mu/alpha)) log_alpha = np.log(1/(alpha+1)) alpha3 = alpha**3 alpha2 = alpha**2 mu2 = mu**2 dada = ((alpha3*mu*(2*log_alpha + 2*digamma_part + 3) - 2*alpha3*y + alpha2*mu2*trigamma + 4*alpha2*mu*(log_alpha + digamma_part) + alpha2 * (2*mu - y) + 2*alpha*mu2*trigamma + 2*alpha*mu*(log_alpha + digamma_part) + mu2*trigamma)/(alpha**4*(alpha2 + 2*alpha + 1))) hess_arr[-1,-1] = dada.sum() return hess_arr def _hessian_nb2(self, params): """ Hessian of NB2 model. """ if self._transparams: # lnalpha came in during fit alpha = np.exp(params[-1]) else: alpha = params[-1] a1 = 1/alpha params = params[:-1] exog = self.exog y = self.endog[:,None] mu = self.predict(params)[:,None] # for dl/dparams dparams dim = exog.shape[1] hess_arr = np.empty((dim+1,dim+1)) const_arr = a1*mu*(a1+y)/(mu+a1)**2 for i in range(dim): for j in range(dim): if j > i: continue hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] * const_arr, axis=0) tri_idx = np.triu_indices(dim, k=1) hess_arr[tri_idx] = hess_arr.T[tri_idx] # for dl/dparams dalpha da1 = -alpha**-2 dldpda = np.sum(mu*exog*(y-mu)*da1/(mu+a1)**2 , axis=0) hess_arr[-1,:-1] = dldpda hess_arr[:-1,-1] = dldpda # for dl/dalpha dalpha #NOTE: polygamma(1,x) is the trigamma function da2 = 2*alpha**-3 dalpha = da1 * (special.digamma(a1+y) - special.digamma(a1) + np.log(a1) - np.log(a1+mu) - (a1+y)/(a1+mu) + 1) dada = (da2 * dalpha/da1 + da1**2 * (special.polygamma(1, a1+y) - special.polygamma(1, a1) + 1/a1 - 1/(a1 + mu) + (y - mu)/(mu + a1)**2)).sum() hess_arr[-1,-1] = dada return hess_arr #TODO: replace this with analytic where is it used? def score_obs(self, params): sc = approx_fprime_cs(params, self.loglikeobs) return sc def fit(self, start_params=None, method='bfgs', maxiter=35, full_output=1, disp=1, callback=None, cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs): # Note: don't let super handle robust covariance because it has # transformed params if self.loglike_method.startswith('nb') and method not in ['newton', 'ncg']: self._transparams = True # in case same Model instance is refit elif self.loglike_method.startswith('nb'): # method is newton/ncg self._transparams = False # because we need to step in alpha space if start_params is None: # Use poisson fit as first guess. #TODO, Warning: this assumes exposure is logged offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0) if np.size(offset) == 1 and offset == 0: offset = None mod_poi = Poisson(self.endog, self.exog, offset=offset) start_params = mod_poi.fit(disp=0).params if self.loglike_method.startswith('nb'): start_params = np.append(start_params, 0.1) mlefit = super(NegativeBinomial, self).fit(start_params=start_params, maxiter=maxiter, method=method, disp=disp, full_output=full_output, callback=lambda x:x, **kwargs) # TODO: Fix NBin _check_perfect_pred if self.loglike_method.startswith('nb'): # mlefit is a wrapped counts results self._transparams = False # don't need to transform anymore now # change from lnalpha to alpha if method not in ["newton", "ncg"]: mlefit._results.params[-1] = np.exp(mlefit._results.params[-1]) nbinfit = NegativeBinomialResults(self, mlefit._results) result = NegativeBinomialResultsWrapper(nbinfit) else: result = mlefit if cov_kwds is None: cov_kwds = {} #TODO: make this unnecessary ? result._get_robustcov_results(cov_type=cov_type, use_self=True, use_t=use_t, **cov_kwds) return result def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=1, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, **kwargs): if self.loglike_method.startswith('nb') and (np.size(alpha) == 1 and alpha != 0): # don't penalize alpha if alpha is scalar k_params = self.exog.shape[1] + self.k_extra alpha = alpha * np.ones(k_params) alpha[-1] = 0 # alpha for regularized poisson to get starting values alpha_p = alpha[:-1] if (self.k_extra and np.size(alpha) > 1) else alpha self._transparams = False if start_params is None: # Use poisson fit as first guess. #TODO, Warning: this assumes exposure is logged offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0) if np.size(offset) == 1 and offset == 0: offset = None mod_poi = Poisson(self.endog, self.exog, offset=offset) start_params = mod_poi.fit_regularized( start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=0, callback=callback, alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params if self.loglike_method.startswith('nb'): start_params = np.append(start_params, 0.1) cntfit = super(CountModel, self).fit_regularized( start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol, size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs) if method in ['l1', 'l1_cvxopt_cp']: discretefit = L1NegativeBinomialResults(self, cntfit) else: raise Exception( "argument method == %s, which is not handled" % method) return L1NegativeBinomialResultsWrapper(discretefit) ### Results Class ### class DiscreteResults(base.LikelihoodModelResults): __doc__ = _discrete_results_docs % {"one_line_description" : "A results class for the discrete dependent variable models.", "extra_attr" : ""} def __init__(self, model, mlefit, cov_type='nonrobust', cov_kwds=None, use_t=None): #super(DiscreteResults, self).__init__(model, params, # np.linalg.inv(-hessian), scale=1.) self.model = model self.df_model = model.df_model self.df_resid = model.df_resid self._cache = resettable_cache() self.nobs = model.exog.shape[0] self.__dict__.update(mlefit.__dict__) if not hasattr(self, 'cov_type'): # do this only if super, i.e. mlefit didn't already add cov_type # robust covariance if use_t is not None: self.use_t = use_t if cov_type == 'nonrobust': self.cov_type = 'nonrobust' self.cov_kwds = {'description' : 'Standard Errors assume that the ' + 'covariance matrix of the errors is correctly ' + 'specified.'} else: if cov_kwds is None: cov_kwds = {} from statsmodels.base.covtype import get_robustcov_results get_robustcov_results(self, cov_type=cov_type, use_self=True, **cov_kwds) def __getstate__(self): try: #remove unpicklable callback self.mle_settings['callback'] = None except (AttributeError, KeyError): pass return self.__dict__ @cache_readonly def prsquared(self): return 1 - self.llf/self.llnull @cache_readonly def llr(self): return -2*(self.llnull - self.llf) @cache_readonly def llr_pvalue(self): return stats.chisqprob(self.llr, self.df_model) @cache_readonly def llnull(self): model = self.model kwds = model._get_init_kwds() # TODO: what parameters to pass to fit? mod_null = model.__class__(model.endog, np.ones(self.nobs), **kwds) # TODO: consider catching and warning on convergence failure? # in the meantime, try hard to converge. see # TestPoissonConstrained1a.test_smoke res_null = mod_null.fit(disp=0, warn_convergence=False, maxiter=10000) return res_null.llf @cache_readonly def fittedvalues(self): return np.dot(self.model.exog, self.params[:self.model.exog.shape[1]]) @cache_readonly def aic(self): return -2*(self.llf - (self.df_model+1)) @cache_readonly def bic(self): return -2*self.llf + np.log(self.nobs)*(self.df_model+1) def _get_endog_name(self, yname, yname_list): if yname is None: yname = self.model.endog_names if yname_list is None: yname_list = self.model.endog_names return yname, yname_list def get_margeff(self, at='overall', method='dydx', atexog=None, dummy=False, count=False): """Get marginal effects of the fitted model. Parameters ---------- at : str, optional Options are: - 'overall', The average of the marginal effects at each observation. - 'mean', The marginal effects at the mean of each regressor. - 'median', The marginal effects at the median of each regressor. - 'zero', The marginal effects at zero for each regressor. - 'all', The marginal effects at each observation. If `at` is all only margeff will be available from the returned object. Note that if `exog` is specified, then marginal effects for all variables not specified by `exog` are calculated using the `at` option. method : str, optional Options are: - 'dydx' - dy/dx - No transformation is made and marginal effects are returned. This is the default. - 'eyex' - estimate elasticities of variables in `exog` -- d(lny)/d(lnx) - 'dyex' - estimate semielasticity -- dy/d(lnx) - 'eydx' - estimate semeilasticity -- d(lny)/dx Note that tranformations are done after each observation is calculated. Semi-elasticities for binary variables are computed using the midpoint method. 'dyex' and 'eyex' do not make sense for discrete variables. atexog : array-like, optional Optionally, you can provide the exogenous variables over which to get the marginal effects. This should be a dictionary with the key as the zero-indexed column number and the value of the dictionary. Default is None for all independent variables less the constant. dummy : bool, optional If False, treats binary variables (if present) as continuous. This is the default. Else if True, treats binary variables as changing from 0 to 1. Note that any variable that is either 0 or 1 is treated as binary. Each binary variable is treated separately for now. count : bool, optional If False, treats count variables (if present) as continuous. This is the default. Else if True, the marginal effect is the change in probabilities when each observation is increased by one. Returns ------- DiscreteMargins : marginal effects instance Returns an object that holds the marginal effects, standard errors, confidence intervals, etc. See `statsmodels.discrete.discrete_margins.DiscreteMargins` for more information. Notes ----- When using after Poisson, returns the expected number of events per period, assuming that the model is loglinear. """ from statsmodels.discrete.discrete_margins import DiscreteMargins return DiscreteMargins(self, (at, method, atexog, dummy, count)) def summary(self, yname=None, xname=None, title=None, alpha=.05, yname_list=None): """Summarize the Regression Results Parameters ----------- yname : string, optional Default is `y` xname : list of strings, optional Default is `var_##` for ## in p the number of regressors title : string, optional Title for the top table. If not None, then this replaces the default title alpha : float significance level for the confidence intervals Returns ------- smry : Summary instance this holds the summary tables and text, which can be printed or converted to various output formats. See Also -------- statsmodels.iolib.summary.Summary : class to hold summary results """ top_left = [('Dep. Variable:', None), ('Model:', [self.model.__class__.__name__]), ('Method:', ['MLE']), ('Date:', None), ('Time:', None), #('No. iterations:', ["%d" % self.mle_retvals['iterations']]), ('converged:', ["%s" % self.mle_retvals['converged']]) ] top_right = [('No. Observations:', None), ('Df Residuals:', None), ('Df Model:', None), ('Pseudo R-squ.:', ["%#6.4g" % self.prsquared]), ('Log-Likelihood:', None), ('LL-Null:', ["%#8.5g" % self.llnull]), ('LLR p-value:', ["%#6.4g" % self.llr_pvalue]) ] if title is None: title = self.model.__class__.__name__ + ' ' + "Regression Results" #boiler plate from statsmodels.iolib.summary import Summary smry = Summary() yname, yname_list = self._get_endog_name(yname, yname_list) # for top of table smry.add_table_2cols(self, gleft=top_left, gright=top_right, #[], yname=yname, xname=xname, title=title) # for parameters, etc smry.add_table_params(self, yname=yname_list, xname=xname, alpha=alpha, use_t=self.use_t) if hasattr(self, 'constraints'): smry.add_extra_txt(['Model has been estimated subject to linear ' 'equality constraints.']) #diagnostic table not used yet #smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right, # yname=yname, xname=xname, # title="") return smry def summary2(self, yname=None, xname=None, title=None, alpha=.05, float_format="%.4f"): """Experimental function to summarize regression results Parameters ----------- xname : List of strings of length equal to the number of parameters Names of the independent variables (optional) yname : string Name of the dependent variable (optional) title : string, optional Title for the top table. If not None, then this replaces the default title alpha : float significance level for the confidence intervals float_format: string print format for floats in parameters summary Returns ------- smry : Summary instance this holds the summary tables and text, which can be printed or converted to various output formats. See Also -------- statsmodels.iolib.summary.Summary : class to hold summary results """ # Summary from statsmodels.iolib import summary2 smry = summary2.Summary() smry.add_base(results=self, alpha=alpha, float_format=float_format, xname=xname, yname=yname, title=title) if hasattr(self, 'constraints'): smry.add_text('Model has been estimated subject to linear ' 'equality constraints.') return smry class CountResults(DiscreteResults): __doc__ = _discrete_results_docs % { "one_line_description" : "A results class for count data", "extra_attr" : ""} @cache_readonly def resid(self): """ Residuals Notes ----- The residuals for Count models are defined as .. math:: y - p where :math:`p = \\exp(X\\beta)`. Any exposure and offset variables are also handled. """ return self.model.endog - self.predict() class NegativeBinomialResults(CountResults): __doc__ = _discrete_results_docs % { "one_line_description" : "A results class for NegativeBinomial 1 and 2", "extra_attr" : ""} @cache_readonly def lnalpha(self): return np.log(self.params[-1]) @cache_readonly def lnalpha_std_err(self): return self.bse[-1] / self.params[-1] @cache_readonly def aic(self): # + 1 because we estimate alpha k_extra = getattr(self.model, 'k_extra', 0) return -2*(self.llf - (self.df_model + self.k_constant + k_extra)) @cache_readonly def bic(self): # + 1 because we estimate alpha k_extra = getattr(self.model, 'k_extra', 0) return -2*self.llf + np.log(self.nobs)*(self.df_model + self.k_constant + k_extra) class L1CountResults(DiscreteResults): __doc__ = _discrete_results_docs % {"one_line_description" : "A results class for count data fit by l1 regularization", "extra_attr" : _l1_results_attr} #discretefit = CountResults(self, cntfit) def __init__(self, model, cntfit): super(L1CountResults, self).__init__(model, cntfit) # self.trimmed is a boolean array with T/F telling whether or not that # entry in params has been set zero'd out. self.trimmed = cntfit.mle_retvals['trimmed'] self.nnz_params = (self.trimmed == False).sum() # update degrees of freedom self.model.df_model = self.nnz_params - 1 self.model.df_resid = float(self.model.endog.shape[0] - self.nnz_params) # adjust for extra parameter in NegativeBinomial nb1 and nb2 # extra parameter is not included in df_model k_extra = getattr(self.model, 'k_extra', 0) self.model.df_model -= k_extra self.model.df_resid += k_extra self.df_model = self.model.df_model self.df_resid = self.model.df_resid class PoissonResults(CountResults): def predict_prob(self, n=None, exog=None, exposure=None, offset=None, transform=True): """ Return predicted probability of each count level for each observation Parameters ---------- n : array-like or int The counts for which you want the probabilities. If n is None then the probabilities for each count from 0 to max(y) are given. Returns ------- ndarray A nobs x n array where len(`n`) columns are indexed by the count n. If n is None, then column 0 is the probability that each observation is 0, column 1 is the probability that each observation is 1, etc. """ if n is not None: counts = np.atleast_2d(n) else: counts = np.atleast_2d(np.arange(0, np.max(self.model.endog)+1)) mu = self.predict(exog=exog, exposure=exposure, offset=offset, transform=transform, linear=False)[:,None] # uses broadcasting return stats.poisson.pmf(counts, mu) class L1PoissonResults(L1CountResults, PoissonResults): pass class L1NegativeBinomialResults(L1CountResults, NegativeBinomialResults): pass class OrderedResults(DiscreteResults): __doc__ = _discrete_results_docs % {"one_line_description" : "A results class for ordered discrete data." , "extra_attr" : ""} pass class BinaryResults(DiscreteResults): __doc__ = _discrete_results_docs % {"one_line_description" : "A results class for binary data", "extra_attr" : ""} def pred_table(self, threshold=.5): """ Prediction table Parameters ---------- threshold : scalar Number between 0 and 1. Threshold above which a prediction is considered 1 and below which a prediction is considered 0. Notes ------ pred_table[i,j] refers to the number of times "i" was observed and the model predicted "j". Correct predictions are along the diagonal. """ model = self.model actual = model.endog pred = np.array(self.predict() > threshold, dtype=float) bins = np.array([0, 0.5, 1]) return np.histogram2d(actual, pred, bins=bins)[0] def summary(self, yname=None, xname=None, title=None, alpha=.05, yname_list=None): smry = super(BinaryResults, self).summary(yname, xname, title, alpha, yname_list) fittedvalues = self.model.cdf(self.fittedvalues) absprederror = np.abs(self.model.endog - fittedvalues) predclose_sum = (absprederror < 1e-4).sum() predclose_frac = predclose_sum / len(fittedvalues) #add warnings/notes etext = [] if predclose_sum == len(fittedvalues): #nobs? wstr = "Complete Separation: The results show that there is" wstr += "complete separation.\n" wstr += "In this case the Maximum Likelihood Estimator does " wstr += "not exist and the parameters\n" wstr += "are not identified." etext.append(wstr) elif predclose_frac > 0.1: # TODO: get better diagnosis wstr = "Possibly complete quasi-separation: A fraction " wstr += "%4.2f of observations can be\n" % predclose_frac wstr += "perfectly predicted. This might indicate that there " wstr += "is complete\nquasi-separation. In this case some " wstr += "parameters will not be identified." etext.append(wstr) if etext: smry.add_extra_txt(etext) return smry summary.__doc__ = DiscreteResults.summary.__doc__ @cache_readonly def resid_dev(self): """ Deviance residuals Notes ----- Deviance residuals are defined .. math:: d_j = \\pm\\left(2\\left[Y_j\\ln\\left(\\frac{Y_j}{M_jp_j}\\right) + (M_j - Y_j\\ln\\left(\\frac{M_j-Y_j}{M_j(1-p_j)} \\right) \\right] \\right)^{1/2} where :math:`p_j = cdf(X\\beta)` and :math:`M_j` is the total number of observations sharing the covariate pattern :math:`j`. For now :math:`M_j` is always set to 1. """ #These are the deviance residuals #model = self.model endog = self.model.endog #exog = model.exog # M = # of individuals that share a covariate pattern # so M[i] = 2 for i = two share a covariate pattern M = 1 p = self.predict() #Y_0 = np.where(exog == 0) #Y_M = np.where(exog == M) #NOTE: Common covariate patterns are not yet handled res = -(1-endog)*np.sqrt(2*M*np.abs(np.log(1-p))) + \ endog*np.sqrt(2*M*np.abs(np.log(p))) return res @cache_readonly def resid_pearson(self): """ Pearson residuals Notes ----- Pearson residuals are defined to be .. math:: r_j = \\frac{(y - M_jp_j)}{\\sqrt{M_jp_j(1-p_j)}} where :math:`p_j=cdf(X\\beta)` and :math:`M_j` is the total number of observations sharing the covariate pattern :math:`j`. For now :math:`M_j` is always set to 1. """ # Pearson residuals #model = self.model endog = self.model.endog #exog = model.exog # M = # of individuals that share a covariate pattern # so M[i] = 2 for i = two share a covariate pattern # use unique row pattern? M = 1 p = self.predict() return (endog - M*p)/np.sqrt(M*p*(1-p)) @cache_readonly def resid_response(self): """ The response residuals Notes ----- Response residuals are defined to be .. math:: y - p where :math:`p=cdf(X\\beta)`. """ return self.model.endog - self.predict() class LogitResults(BinaryResults): __doc__ = _discrete_results_docs % { "one_line_description" : "A results class for Logit Model", "extra_attr" : ""} @cache_readonly def resid_generalized(self): """ Generalized residuals Notes ----- The generalized residuals for the Logit model are defined .. math:: y - p where :math:`p=cdf(X\\beta)`. This is the same as the `resid_response` for the Logit model. """ # Generalized residuals return self.model.endog - self.predict() class ProbitResults(BinaryResults): __doc__ = _discrete_results_docs % { "one_line_description" : "A results class for Probit Model", "extra_attr" : ""} @cache_readonly def resid_generalized(self): """ Generalized residuals Notes ----- The generalized residuals for the Probit model are defined .. math:: y\\frac{\\phi(X\\beta)}{\\Phi(X\\beta)}-(1-y)\\frac{\\phi(X\\beta)}{1-\\Phi(X\\beta)} """ # generalized residuals model = self.model endog = model.endog XB = self.predict(linear=True) pdf = model.pdf(XB) cdf = model.cdf(XB) return endog * pdf/cdf - (1-endog)*pdf/(1-cdf) class L1BinaryResults(BinaryResults): __doc__ = _discrete_results_docs % {"one_line_description" : "Results instance for binary data fit by l1 regularization", "extra_attr" : _l1_results_attr} def __init__(self, model, bnryfit): super(L1BinaryResults, self).__init__(model, bnryfit) # self.trimmed is a boolean array with T/F telling whether or not that # entry in params has been set zero'd out. self.trimmed = bnryfit.mle_retvals['trimmed'] self.nnz_params = (self.trimmed == False).sum() self.model.df_model = self.nnz_params - 1 self.model.df_resid = float(self.model.endog.shape[0] - self.nnz_params) self.df_model = self.model.df_model self.df_resid = self.model.df_resid class MultinomialResults(DiscreteResults): __doc__ = _discrete_results_docs % {"one_line_description" : "A results class for multinomial data", "extra_attr" : ""} def _maybe_convert_ynames_int(self, ynames): # see if they're integers try: for i in ynames: if ynames[i] % 1 == 0: ynames[i] = str(int(ynames[i])) except TypeError: pass return ynames def _get_endog_name(self, yname, yname_list, all=False): """ If all is False, the first variable name is dropped """ model = self.model if yname is None: yname = model.endog_names if yname_list is None: ynames = model._ynames_map ynames = self._maybe_convert_ynames_int(ynames) # use range below to ensure sortedness ynames = [ynames[key] for key in range(int(model.J))] ynames = ['='.join([yname, name]) for name in ynames] if not all: yname_list = ynames[1:] # assumes first variable is dropped else: yname_list = ynames return yname, yname_list def pred_table(self): """ Returns the J x J prediction table. Notes ----- pred_table[i,j] refers to the number of times "i" was observed and the model predicted "j". Correct predictions are along the diagonal. """ ju = self.model.J - 1 # highest index # these are the actual, predicted indices #idx = lzip(self.model.endog, self.predict().argmax(1)) bins = np.concatenate(([0], np.linspace(0.5, ju - 0.5, ju), [ju])) return np.histogram2d(self.model.endog, self.predict().argmax(1), bins=bins)[0] @cache_readonly def bse(self): bse = np.sqrt(np.diag(self.cov_params())) return bse.reshape(self.params.shape, order='F') @cache_readonly def aic(self): return -2*(self.llf - (self.df_model+self.model.J-1)) @cache_readonly def bic(self): return -2*self.llf + np.log(self.nobs)*(self.df_model+self.model.J-1) def conf_int(self, alpha=.05, cols=None): confint = super(DiscreteResults, self).conf_int(alpha=alpha, cols=cols) return confint.transpose(2,0,1) def margeff(self): raise NotImplementedError("Use get_margeff instead") @cache_readonly def resid_misclassified(self): """ Residuals indicating which observations are misclassified. Notes ----- The residuals for the multinomial model are defined as .. math:: argmax(y_i) \\neq argmax(p_i) where :math:`argmax(y_i)` is the index of the category for the endogenous variable and :math:`argmax(p_i)` is the index of the predicted probabilities for each category. That is, the residual is a binary indicator that is 0 if the category with the highest predicted probability is the same as that of the observed variable and 1 otherwise. """ # it's 0 or 1 - 0 for correct prediction and 1 for a missed one return (self.model.wendog.argmax(1) != self.predict().argmax(1)).astype(float) def summary2(self, alpha=0.05, float_format="%.4f"): """Experimental function to summarize regression results Parameters ----------- alpha : float significance level for the confidence intervals float_format: string print format for floats in parameters summary Returns ------- smry : Summary instance this holds the summary tables and text, which can be printed or converted to various output formats. See Also -------- statsmodels.iolib.summary2.Summary : class to hold summary results """ from statsmodels.iolib import summary2 smry = summary2.Summary() smry.add_dict(summary2.summary_model(self)) # One data frame per value of endog eqn = self.params.shape[1] confint = self.conf_int(alpha) for i in range(eqn): coefs = summary2.summary_params(self, alpha, self.params[:,i], self.bse[:,i], self.tvalues[:,i], self.pvalues[:,i], confint[i]) # Header must show value of endog level_str = self.model.endog_names + ' = ' + str(i) coefs[level_str] = coefs.index coefs = coefs.ix[:,[-1,0,1,2,3,4,5]] smry.add_df(coefs, index=False, header=True, float_format=float_format) smry.add_title(results=self) return smry class
(MultinomialResults): __doc__ = _discrete_results_docs % {"one_line_description" : "A results class for multinomial data fit by l1 regularization", "extra_attr" : _l1_results_attr} def __init__(self, model, mlefit): super(L1MultinomialResults, self).__init__(model, mlefit) # self.trimmed is a boolean array with T/F telling whether or not that # entry in params has been set zero'd out. self.trimmed = mlefit.mle_retvals['trimmed'] self.nnz_params = (self.trimmed == False).sum() #Note: J-1 constants self.model.df_model = self.nnz_params - (self.model.J - 1) self.model.df_resid = float(self.model.endog.shape[0] - self.nnz_params) self.df_model = self.model.df_model self.df_resid = self.model.df_resid #### Results Wrappers #### class OrderedResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(OrderedResultsWrapper, OrderedResults) class CountResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(CountResultsWrapper, CountResults) class NegativeBinomialResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(NegativeBinomialResultsWrapper, NegativeBinomialResults) class PoissonResultsWrapper(lm.RegressionResultsWrapper): pass #_methods = { # "predict_prob" : "rows", # } #_wrap_methods = lm.wrap.union_dicts( # lm.RegressionResultsWrapper._wrap_methods, # _methods) wrap.populate_wrapper(PoissonResultsWrapper, PoissonResults) class L1CountResultsWrapper(lm.RegressionResultsWrapper): pass class L1PoissonResultsWrapper(lm.RegressionResultsWrapper): pass #_methods = { # "predict_prob" : "rows", # } #_wrap_methods = lm.wrap.union_dicts( # lm.RegressionResultsWrapper._wrap_methods, # _methods) wrap.populate_wrapper(L1PoissonResultsWrapper, L1PoissonResults) class L1NegativeBinomialResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(L1NegativeBinomialResultsWrapper, L1NegativeBinomialResults) class BinaryResultsWrapper(lm.RegressionResultsWrapper): _attrs = {"resid_dev" : "rows", "resid_generalized" : "rows", "resid_pearson" : "rows", "resid_response" : "rows" } _wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs, _attrs) wrap.populate_wrapper(BinaryResultsWrapper, BinaryResults) class L1BinaryResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(L1BinaryResultsWrapper, L1BinaryResults) class MultinomialResultsWrapper(lm.RegressionResultsWrapper): _attrs = {"resid_misclassified" : "rows"} _wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs, _attrs) wrap.populate_wrapper(MultinomialResultsWrapper, MultinomialResults) class L1MultinomialResultsWrapper(lm.RegressionResultsWrapper): pass wrap.populate_wrapper(L1MultinomialResultsWrapper, L1MultinomialResults) if __name__=="__main__": import numpy as np import statsmodels.api as sm # Scratch work for negative binomial models # dvisits was written using an R package, I can provide the dataset # on request until the copyright is cleared up #TODO: request permission to use dvisits data2 = np.genfromtxt('../datasets/dvisits/dvisits.csv', names=True) # note that this has missing values for Accident endog = data2['doctorco'] exog = data2[['sex','age','agesq','income','levyplus','freepoor', 'freerepa','illness','actdays','hscore','chcond1', 'chcond2']].view(float, np.ndarray).reshape(len(data2),-1) exog = sm.add_constant(exog, prepend=True) poisson_mod = Poisson(endog, exog) poisson_res = poisson_mod.fit() # nb2_mod = NegBinTwo(endog, exog) # nb2_res = nb2_mod.fit() # solvers hang (with no error and no maxiter warn...) # haven't derived hessian (though it will be block diagonal) to check # newton, note that Lawless (1987) has the derivations # appear to be something wrong with the score? # according to Lawless, traditionally the likelihood is maximized wrt to B # and a gridsearch on a to determin ahat? # or the Breslow approach, which is 2 step iterative. nb2_params = [-2.190,.217,-.216,.609,-.142,.118,-.497,.145,.214,.144, .038,.099,.190,1.077] # alpha is last # taken from Cameron and Trivedi # the below is from Cameron and Trivedi as well # endog2 = np.array(endog>=1, dtype=float) # skipped for now, binary poisson results look off? data = sm.datasets.randhie.load() nbreg = NegativeBinomial mod = nbreg(data.endog, data.exog.view((float,9))) #FROM STATA: params = np.asarray([-.05654133, -.21214282, .0878311, -.02991813, .22903632, .06210226, .06799715, .08407035, .18532336]) bse = [0.0062541, 0.0231818, 0.0036942, 0.0034796, 0.0305176, 0.0012397, 0.0198008, 0.0368707, 0.0766506] lnalpha = .31221786 mod.loglike(np.r_[params,np.exp(lnalpha)]) poiss_res = Poisson(data.endog, data.exog.view((float,9))).fit() func = lambda x: -mod.loglike(x) grad = lambda x: -mod.score(x) from scipy import optimize # res1 = optimize.fmin_l_bfgs_b(func, np.r_[poiss_res.params,.1], # approx_grad=True) res1 = optimize.fmin_bfgs(func, np.r_[poiss_res.params,.1], fprime=grad) from statsmodels.tools.numdiff import approx_hess_cs # np.sqrt(np.diag(-np.linalg.inv(approx_hess_cs(np.r_[params,lnalpha], mod.loglike)))) #NOTE: this is the hessian in terms of alpha _not_ lnalpha hess_arr = mod.hessian(res1)
L1MultinomialResults
text.py
# Copyright 2008-2015 Nokia Networks # Copyright 2016- Robot Framework Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from itertools import takewhile import inspect import os.path import re from .charwidth import get_char_width from .misc import seq2str2 from .platform import JYTHON, PY_VERSION from .robottypes import is_string, is_unicode from .unic import unic MAX_ERROR_LINES = 40 _MAX_ASSIGN_LENGTH = 200 _MAX_ERROR_LINE_LENGTH = 78 _ERROR_CUT_EXPLN = ' [ Message content over the limit has been removed. ]' _TAGS_RE = re.compile(r'\s*tags:(.*)', re.IGNORECASE) def cut_long_message(msg): if MAX_ERROR_LINES is None: return msg lines = msg.splitlines() lengths = _count_line_lengths(lines) if sum(lengths) <= MAX_ERROR_LINES: return msg start = _prune_excess_lines(lines, lengths) end = _prune_excess_lines(lines, lengths, from_end=True) return '\n'.join(start + [_ERROR_CUT_EXPLN] + end) def _prune_excess_lines(lines, lengths, from_end=False): if from_end: lines.reverse() lengths.reverse() ret = [] total = 0 limit = MAX_ERROR_LINES // 2 for line, length in zip(lines[:limit], lengths[:limit]): if total + length >= limit: ret.append(_cut_long_line(line, total, from_end)) break total += length ret.append(line) if from_end: ret.reverse() return ret def _cut_long_line(line, used, from_end): available_lines = MAX_ERROR_LINES // 2 - used available_chars = available_lines * _MAX_ERROR_LINE_LENGTH - 3 if len(line) > available_chars: if not from_end: line = line[:available_chars] + '...' else: line = '...' + line[-available_chars:] return line def _count_line_lengths(lines): return [ _count_virtual_line_length(line) for line in lines ] def _count_virtual_line_length(line): if not line: return 1 lines, remainder = divmod(len(line), _MAX_ERROR_LINE_LENGTH) return lines if not remainder else lines + 1 def format_assign_message(variable, value, cut_long=True): formatter = {'$': unic, '@': seq2str2, '&': _dict_to_str}[variable[0]] value = formatter(value) if cut_long and len(value) > _MAX_ASSIGN_LENGTH: value = value[:_MAX_ASSIGN_LENGTH] + '...' return '%s = %s' % (variable, value) def _dict_to_str(d): if not d: return '{ }' return '{ %s }' % ' | '.join('%s=%s' % (unic(k), unic(v)) for k, v in d.items()) def get_console_length(text): return sum(get_char_width(char) for char in text) def pad_console_length(text, width): if width < 5: width = 5 diff = get_console_length(text) - width if diff > 0: text = _lose_width(text, diff+3) + '...' return _pad_width(text, width) def
(text, width): more = width - get_console_length(text) return text + ' ' * more def _lose_width(text, diff): lost = 0 while lost < diff: lost += get_console_length(text[-1]) text = text[:-1] return text def split_args_from_name_or_path(name): """Split arguments embedded to name or path like ``Example:arg1:arg2``. The separator can be either colon ``:`` or semicolon ``;``. If both are used, the first one is considered to be the separator. """ if os.path.exists(name): return os.path.abspath(name), [] index = _get_arg_separator_index_from_name_or_path(name) if index == -1: return name, [] args = name[index+1:].split(name[index]) name = name[:index] if os.path.exists(name): name = os.path.abspath(name) return name, args def _get_arg_separator_index_from_name_or_path(name): colon_index = name.find(':') # Handle absolute Windows paths if colon_index == 1 and name[2:3] in ('/', '\\'): colon_index = name.find(':', colon_index+1) semicolon_index = name.find(';') if colon_index == -1: return semicolon_index if semicolon_index == -1: return colon_index return min(colon_index, semicolon_index) def split_tags_from_doc(doc): doc = doc.rstrip() tags = [] if not doc: return doc, tags lines = doc.splitlines() match = _TAGS_RE.match(lines[-1]) if match: doc = '\n'.join(lines[:-1]).rstrip() tags = [tag.strip() for tag in match.group(1).split(',')] return doc, tags def getdoc(item): doc = inspect.getdoc(item) or u'' if is_unicode(doc): return doc try: return doc.decode('UTF-8') except UnicodeDecodeError: return unic(doc) def getshortdoc(doc_or_item, linesep='\n'): if not doc_or_item: return u'' doc = doc_or_item if is_string(doc_or_item) else getdoc(doc_or_item) lines = takewhile(lambda line: line.strip(), doc.splitlines()) return linesep.join(lines) # https://bugs.jython.org/issue2772 if JYTHON and PY_VERSION < (2, 7, 2): trailing_spaces = re.compile('\s+$', re.UNICODE) def rstrip(string): return trailing_spaces.sub('', string) else: def rstrip(string): return string.rstrip()
_pad_width
main.go
package main import ( "context" "fmt" log "github.com/sirupsen/logrus" "github.com/kyma-incubator/hydroform/function-examples/internal/client" xunstruct "github.com/kyma-incubator/hydroform/function-examples/internal/unstructured" "github.com/kyma-incubator/hydroform/function/pkg/operator" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) func main()
{ ctx := context.Background() c := client.MapClient{ Data: unstructured.UnstructuredList{}, } u := xunstruct.NewSample("test", "test-ns") o := operator.NewGenericOperator(&c, u) if err := o.Apply(ctx, operator.ApplyOptions{ Options: operator.Options{ Callbacks: operator.Callbacks{ Pre: []func(interface{}, error) error{ func(i interface{}, e error) error { u, ok := i.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unexpected type") } log.WithFields(u.Object).Info("applying object") return nil }, }, }, }, }); err != nil { log.Fatal(err) } }
fpn.py
import warnings import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from ..registry import NECKS @NECKS.register_module class FPN(nn.Module): def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False, extra_convs_on_inputs=True, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, attention=False, act_cfg=None, upsample_cfg=dict(mode='nearest'), init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform'), cfg=None):
def forward(self, inputs): """Forward function.""" assert len(inputs) >= len(self.in_channels) if len(inputs) > len(self.in_channels): for _ in range(len(inputs) - len(self.in_channels)): del inputs[0] # build laterals laterals = [ lateral_conv(inputs[i + self.start_level]) for i, lateral_conv in enumerate(self.lateral_convs) ] # build top-down path used_backbone_levels = len(laterals) for i in range(used_backbone_levels - 1, 0, -1): # In some cases, fixing `scale factor` (e.g. 2) is preferred, but # it cannot co-exist with `size` in `F.interpolate`. if 'scale_factor' in self.upsample_cfg: laterals[i - 1] += F.interpolate(laterals[i], **self.upsample_cfg) else: prev_shape = laterals[i - 1].shape[2:] laterals[i - 1] += F.interpolate(laterals[i], size=prev_shape, **self.upsample_cfg) # build outputs # part 1: from original levels outs = [ self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) ] # part 2: add extra levels if self.num_outs > len(outs): # use max pool to get more levels on top of outputs # (e.g., Faster R-CNN, Mask R-CNN) if not self.add_extra_convs: for i in range(self.num_outs - used_backbone_levels): outs.append(F.max_pool2d(outs[-1], 1, stride=2)) # add conv layers on top of original feature maps (RetinaNet) else: if self.add_extra_convs == 'on_input': extra_source = inputs[self.backbone_end_level - 1] elif self.add_extra_convs == 'on_lateral': extra_source = laterals[-1] elif self.add_extra_convs == 'on_output': extra_source = outs[-1] else: raise NotImplementedError outs.append(self.fpn_convs[used_backbone_levels](extra_source)) for i in range(used_backbone_levels + 1, self.num_outs): if self.relu_before_extra_convs: outs.append(self.fpn_convs[i](F.relu(outs[-1]))) else: outs.append(self.fpn_convs[i](outs[-1])) return tuple(outs)
super(FPN, self).__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.attention = attention self.relu_before_extra_convs = relu_before_extra_convs self.no_norm_on_lateral = no_norm_on_lateral self.upsample_cfg = upsample_cfg.copy() if end_level == -1: self.backbone_end_level = self.num_ins assert num_outs >= self.num_ins - start_level else: # if end_level < inputs, no extra level is allowed self.backbone_end_level = end_level assert end_level <= len(in_channels) assert num_outs == end_level - start_level self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs assert isinstance(add_extra_convs, (str, bool)) if isinstance(add_extra_convs, str): # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') elif add_extra_convs: # True if extra_convs_on_inputs: # TODO: deprecate `extra_convs_on_inputs` warnings.simplefilter('once') warnings.warn( '"extra_convs_on_inputs" will be deprecated in v2.9.0,' 'Please use "add_extra_convs"', DeprecationWarning) self.add_extra_convs = 'on_input' else: self.add_extra_convs = 'on_output' self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule( in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, act_cfg=act_cfg, inplace=False) fpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) # add extra conv layers (e.g., RetinaNet) extra_levels = num_outs - self.backbone_end_level + self.start_level if self.add_extra_convs and extra_levels >= 1: for i in range(extra_levels): if i == 0 and self.add_extra_convs == 'on_input': in_channels = self.in_channels[self.backbone_end_level - 1] else: in_channels = out_channels extra_fpn_conv = ConvModule(in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.fpn_convs.append(extra_fpn_conv)
log_logzio.go
// Copyright 2021-2022, the SS project owners. All rights reserved. // Please see the OWNERS and LICENSE files for details. package ss import ( "fmt" "log" logziolib "github.com/logzio/logzio-go" ) func newLogzioIfSet( projectPackage string, module string, config Config, sentry sentry, ) (logDestination, error) { logConfig := config.SS.Log.Logzio if logConfig == nil { return nil, nil } result := logzio{ messageChan: make(chan *LogMsg, 10), syncChan: make(chan struct{}),
var err error result.sender, err = logziolib.New( logConfig.Token, logziolib.SetUrl(fmt.Sprintf("https://%s:8071", logConfig.Host)), // logziolib.SetDebug(os.Stderr), logziolib.SetTempDirectory("/tmp/logzio_tmp")) if err != nil { panic(err) } go result.runWriter() return result, nil } type logzio struct { messageChan chan *LogMsg syncChan chan struct{} sender *logziolib.LogzioSender sentry sentry } func (logzio) GetName() string { return "Logz.io" } func (l logzio) WriteDebug(message *LogMsg) error { l.messageChan <- message return nil } func (l logzio) WriteInfo(message *LogMsg) error { l.messageChan <- message return nil } func (l logzio) WriteWarn(message *LogMsg) error { l.messageChan <- message return nil } func (l logzio) WriteError(message *LogMsg) error { l.messageChan <- message return nil } func (l logzio) WritePanic(message *LogMsg) error { l.messageChan <- message return nil } func (l logzio) Sync() error { l.messageChan <- nil <-l.syncChan return nil } func (l logzio) runWriter() { defer l.sender.Stop() for { message, isOpen := <-l.messageChan if !isOpen { break } if message == nil { if err := l.sender.Sync(); err != nil { log.Printf(`Error: Failed to sync log %q record: %v`, l.GetName(), err) l.sentry.CaptureMessage( NewLogMsg(`failed to sync log %q record`, l.GetName()).AddErr(err)) } l.syncChan <- struct{}{} continue } if err := l.sender.Send(message.ConvertToJSON()); err != nil { log.Printf( `Error: Failed to write log %q record: %v`, l.GetName(), err) l.sentry.CaptureMessage( NewLogMsg(`failed to write log %q record`, l.GetName()).AddErr(err)) } } l.sender.Drain() }
sentry: sentry, }
marian_model.rs
// Copyright 2018-2020 The HuggingFace Inc. team. // Copyright 2020 Marian Team Authors // Copyright 2019-2020 Guillaume Becquin // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::bart::{BartConfig, BartModel, BartModelOutput, LayerState}; use crate::pipelines::common::{ModelType, TokenizerOption}; use crate::pipelines::generation_utils::private_generation_utils::{ PreparedInput, PrivateLanguageGenerator, }; use crate::pipelines::generation_utils::{ Cache, GenerateConfig, LMHeadModel, LMModelOutput, LanguageGenerator, }; use crate::{Config, RustBertError}; use rust_tokenizers::tokenizer::{MarianTokenizer, TruncationStrategy}; use rust_tokenizers::vocab::MarianVocab; use std::borrow::Borrow; use tch::nn::Init; use tch::{nn, Kind, Tensor}; /// # Marian Pretrained model weight files pub struct MarianModelResources; /// # Marian Pretrained model config files pub struct MarianConfigResources; /// # Marian Pretrained model vocab files pub struct MarianVocabResources; /// # Marian Pretrained sentence piece model files pub struct MarianSpmResources; /// # Marian optional prefixes pub struct MarianPrefix; impl MarianModelResources { /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. Modified with conversion to C-array format. pub const ENGLISH2ROMANCE: (&'static str, &'static str) = ( "marian-mt-en-ROMANCE/model", "https://huggingface.co/Helsinki-NLP/opus-mt-en-ROMANCE/resolve/main/rust_model.ot", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. Modified with conversion to C-array format. pub const ROMANCE2ENGLISH: (&'static str, &'static str) = ( "marian-mt-ROMANCE-en/model", "https://huggingface.co/Helsinki-NLP/opus-mt-ROMANCE-en/resolve/main/rust_model.ot", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. Modified with conversion to C-array format. pub const ENGLISH2GERMAN: (&'static str, &'static str) = ( "marian-mt-en-de/model", "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/rust_model.ot", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. Modified with conversion to C-array format. pub const GERMAN2ENGLISH: (&'static str, &'static str) = ( "marian-mt-de-en/model", "https://huggingface.co/Helsinki-NLP/opus-mt-de-en/resolve/main/rust_model.ot", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. Modified with conversion to C-array format. pub const ENGLISH2RUSSIAN: (&'static str, &'static str) = ( "marian-mt-en-ru/model", "https://huggingface.co/Helsinki-NLP/opus-mt-en-ru/resolve/main/rust_model.ot", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. Modified with conversion to C-array format. pub const RUSSIAN2ENGLISH: (&'static str, &'static str) = ( "marian-mt-ru-en/model", "https://huggingface.co/Helsinki-NLP/opus-mt-ru-en/resolve/main/rust_model.ot", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. Modified with conversion to C-array format. pub const FRENCH2GERMAN: (&'static str, &'static str) = ( "marian-mt-fr-de/model", "https://huggingface.co/Helsinki-NLP/opus-mt-fr-de/resolve/main/rust_model.ot", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. Modified with conversion to C-array format. pub const GERMAN2FRENCH: (&'static str, &'static str) = ( "marian-mt-de-fr/model", "https://huggingface.co/Helsinki-NLP/opus-mt-de-fr/resolve/main/rust_model.ot", ); } impl MarianConfigResources { /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const ENGLISH2ROMANCE: (&'static str, &'static str) = ( "marian-mt-en-ROMANCE/config", "https://huggingface.co/Helsinki-NLP/opus-mt-en-ROMANCE/resolve/main/config.json", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const ROMANCE2ENGLISH: (&'static str, &'static str) = ( "marian-mt-ROMANCE-en/config", "https://huggingface.co/Helsinki-NLP/opus-mt-ROMANCE-en/resolve/main/config.json", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const ENGLISH2GERMAN: (&'static str, &'static str) = ( "marian-mt-en-de/config", "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const GERMAN2ENGLISH: (&'static str, &'static str) = ( "marian-mt-de-en/config", "https://huggingface.co/Helsinki-NLP/opus-mt-de-en/resolve/main/config.json", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const ENGLISH2RUSSIAN: (&'static str, &'static str) = ( "marian-mt-en-ru/config", "https://huggingface.co/Helsinki-NLP/opus-mt-en-ru/resolve/main/config.json", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const RUSSIAN2ENGLISH: (&'static str, &'static str) = ( "marian-mt-ru-en/config", "https://huggingface.co/Helsinki-NLP/opus-mt-ru-en/resolve/main/config.json", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const FRENCH2GERMAN: (&'static str, &'static str) = ( "marian-mt-fr-de/config", "https://huggingface.co/Helsinki-NLP/opus-mt-fr-de/resolve/main/config.json", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const GERMAN2FRENCH: (&'static str, &'static str) = ( "marian-mt-de-fr/config", "https://huggingface.co/Helsinki-NLP/opus-mt-de-fr/resolve/main/config.json", ); } impl MarianVocabResources { /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const ENGLISH2ROMANCE: (&'static str, &'static str) = ( "marian-mt-en-ROMANCE/vocab", "https://huggingface.co/Helsinki-NLP/opus-mt-en-ROMANCE/resolve/main/vocab.json", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const ROMANCE2ENGLISH: (&'static str, &'static str) = ( "marian-mt-ROMANCE-en/vocab", "https://huggingface.co/Helsinki-NLP/opus-mt-ROMANCE-en/resolve/main/vocab.json", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const ENGLISH2GERMAN: (&'static str, &'static str) = ( "marian-mt-en-de/vocab", "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/vocab.json", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const GERMAN2ENGLISH: (&'static str, &'static str) = ( "marian-mt-de-en/vocab", "https://huggingface.co/Helsinki-NLP/opus-mt-de-en/resolve/main/vocab.json", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const ENGLISH2RUSSIAN: (&'static str, &'static str) = ( "marian-mt-en-ru/vocab", "https://huggingface.co/Helsinki-NLP/opus-mt-en-ru/resolve/main/vocab.json", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const RUSSIAN2ENGLISH: (&'static str, &'static str) = ( "marian-mt-ru-en/vocab", "https://huggingface.co/Helsinki-NLP/opus-mt-ru-en/resolve/main/vocab.json", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const FRENCH2GERMAN: (&'static str, &'static str) = ( "marian-mt-fr-de/vocab", "https://huggingface.co/Helsinki-NLP/opus-mt-fr-de/resolve/main/vocab.json", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const GERMAN2FRENCH: (&'static str, &'static str) = ( "marian-mt-de-fr/vocab", "https://huggingface.co/Helsinki-NLP/opus-mt-de-fr/resolve/main/vocab.json", ); } impl MarianSpmResources { /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const ENGLISH2ROMANCE: (&'static str, &'static str) = ( "marian-mt-en-ROMANCE/spiece", "https://huggingface.co/Helsinki-NLP/opus-mt-en-ROMANCE/resolve/main/source.spm", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const ROMANCE2ENGLISH: (&'static str, &'static str) = ( "marian-mt-ROMANCE-en/spiece", "https://huggingface.co/Helsinki-NLP/opus-mt-ROMANCE-en/resolve/main/source.spm", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const ENGLISH2GERMAN: (&'static str, &'static str) = ( "marian-mt-en-de/spiece", "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/source.spm", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const GERMAN2ENGLISH: (&'static str, &'static str) = ( "marian-mt-de-en/spiece", "https://huggingface.co/Helsinki-NLP/opus-mt-de-en/resolve/main/source.spm", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const ENGLISH2RUSSIAN: (&'static str, &'static str) = ( "marian-mt-en-ru/spiece", "https://huggingface.co/Helsinki-NLP/opus-mt-en-ru/resolve/main/source.spm", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const RUSSIAN2ENGLISH: (&'static str, &'static str) = ( "marian-mt-ru-en/spiece", "https://huggingface.co/Helsinki-NLP/opus-mt-ru-en/resolve/main/source.spm", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const FRENCH2GERMAN: (&'static str, &'static str) = ( "marian-mt-fr-de/spiece", "https://huggingface.co/Helsinki-NLP/opus-mt-fr-de/resolve/main/source.spm", ); /// Shared under Creative Commons Attribution 4.0 International License license by the Opus-MT team from Language Technology at the University of Helsinki at https://github.com/Helsinki-NLP/Opus-MT. pub const GERMAN2FRENCH: (&'static str, &'static str) = ( "marian-mt-de-fr/spiece", "https://huggingface.co/Helsinki-NLP/opus-mt-de-fr/resolve/main/source.spm", ); } impl MarianPrefix { pub const ENGLISH2FRENCH: Option<&'static str> = Some(">>fr<< "); pub const ENGLISH2CATALAN: Option<&'static str> = Some(">>ca<< "); pub const ENGLISH2SPANISH: Option<&'static str> = Some(">>es<< "); pub const ENGLISH2PORTUGUESE: Option<&'static str> = Some(">>pt<< "); pub const ENGLISH2ITALIAN: Option<&'static str> = Some(">>it<< "); pub const ENGLISH2ROMANIAN: Option<&'static str> = Some(">>ro<< "); pub const ENGLISH2GERMAN: Option<&'static str> = None; pub const ENGLISH2RUSSIAN: Option<&'static str> = None; pub const FRENCH2ENGLISH: Option<&'static str> = None; pub const CATALAN2ENGLISH: Option<&'static str> = None; pub const SPANISH2ENGLISH: Option<&'static str> = None; pub const PORTUGUESE2ENGLISH: Option<&'static str> = None; pub const ITALIAN2ENGLISH: Option<&'static str> = None; pub const ROMANIAN2ENGLISH: Option<&'static str> = None; pub const GERMAN2ENGLISH: Option<&'static str> = None; pub const RUSSIAN2ENGLISH: Option<&'static str> = None; pub const FRENCH2GERMAN: Option<&'static str> = None; pub const GERMAN2FRENCH: Option<&'static str> = None; } /// # Marian Model for conditional generation /// Marian model with a vocabulary decoding head /// It is made of the following blocks: /// - `base_model`: `BartModel` Base BART model /// - `linear`: Linear layer with bias tied to the weights of the token id embeddings pub struct MarianForConditionalGeneration { base_model: BartModel, final_logits_bias: Tensor, } impl MarianForConditionalGeneration { /// Build a new `MarianForConditionalGeneration` /// /// # Arguments /// /// * `p` - Variable store path for the root of the BART model /// * `config` - `BartConfig` object defining the model architecture /// * `generation_mode` - flag indicating if the model should run in generation mode (a decoder start token must then be provided) /// /// # Example /// /// ```no_run /// use rust_bert::bart::{BartConfig, BartForConditionalGeneration}; /// use rust_bert::Config; /// use std::path::Path; /// use tch::{nn, Device}; /// /// let config_path = Path::new("path/to/config.json"); /// let device = Device::Cpu; /// let p = nn::VarStore::new(device); /// let config = BartConfig::from_file(config_path); /// let generation_mode = true; /// let bart: BartForConditionalGeneration = /// BartForConditionalGeneration::new(&p.root() / "bart", &config, generation_mode); /// ``` pub fn new<'p, P>( p: P, config: &BartConfig, generation_mode: bool, ) -> MarianForConditionalGeneration where P: Borrow<nn::Path<'p>>, { let p = p.borrow(); let base_model = BartModel::new(p / "model", config, generation_mode); let final_logits_bias = p.var( "final_logits_bias", &[1, config.vocab_size], Init::Const(0.), ); MarianForConditionalGeneration { base_model, final_logits_bias, } } /// Forward pass through the model /// /// # Arguments /// /// * `input_ids` - Optional input tensor of shape (*batch size*, *source_sequence_length*). Must be provided when not running in generation mode /// * `attention_mask` - Optional attention mask of shape (*batch size*, *source_sequence_length*) for the encoder positions. Positions with a mask with value 0 will be masked. /// * `encoder_outputs` - Optional tuple made of a tensor of shape (*batch size*, *source_sequence_length*, *encoder_hidden_dim*) and optional vectors of tensors of length *num_encoder_layers* with shape (*batch size*, *source_sequence_length*, *hidden_size*). /// These correspond to the encoder last hidden state and optional hidden states/attention weights for encoder layers. When provided, the encoder hidden state will not be recalculated. Useful for generation tasks. /// * `decoder_input_ids` - Optional input tensor of shape (*batch size*, *target_sequence_length*). Must be provided when running in generation mode (e.g. initialiazed with a BOS token) /// * `decoder_attention_mask` - Optional attention mask of shape (*batch size*, *target_sequence_length*) for the decoder positions. Positions with a mask with value 0 will be masked. /// * `train` - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference. /// /// # Returns /// /// * `BartModelOutput` containing: /// - `decoder_output` - `Tensor` of shape (*batch size*, *target_sequence_length*, *vocab_size*) representing the logits for each vocabulary item and position /// - `cache` - `(Option<Tensor>, Option<Vec<&LayerState, &LayerState>>)` of length *n_layer* containing the encoder padding mask and past keys and values for both the self attention and the encoder cross attention of each layer of the decoder. /// - `all_decoder_hidden_states` - `Option<Vec<Tensor>>` of length *num_decoder_layers* with shape (*batch size*, *target_sequence_length*, *hidden_size*) /// - `all_decoder_attentions` - `Option<Vec<Tensor>>` of length *num_decoder_layers* with shape (*batch size*, *target_sequence_length*, *hidden_size*) /// /// # Example /// /// ```no_run /// # use tch::{nn, Device, Tensor, no_grad}; /// # use rust_bert::Config; /// # use std::path::Path; /// # use tch::kind::Kind::{Int64, Double}; /// use rust_bert::bart::BartConfig; /// use rust_bert::marian::MarianForConditionalGeneration; /// # let config_path = Path::new("path/to/config.json"); /// # let vocab_path = Path::new("path/to/vocab.txt"); /// # let device = Device::Cpu; /// # let vs = nn::VarStore::new(device); /// # let config = BartConfig::from_file(config_path); /// # let mut marian_model = MarianForConditionalGeneration::new(&vs.root(), &config, false); /// let (batch_size, source_sequence_length, target_sequence_length) = (64, 128, 56); /// let input_tensor = Tensor::rand(&[batch_size, source_sequence_length], (Int64, device)); /// let target_tensor = Tensor::rand(&[batch_size, target_sequence_length], (Int64, device)); /// let encoder_attention_mask = /// Tensor::ones(&[batch_size, source_sequence_length], (Int64, device)); /// let decoder_attention_mask = /// Tensor::ones(&[batch_size, source_sequence_length], (Int64, device)); /// /// let model_output = no_grad(|| { /// marian_model.forward_t( /// Some(&input_tensor), /// Some(&encoder_attention_mask), /// None, /// Some(&target_tensor), /// Some(&decoder_attention_mask), /// None, /// false, /// ) /// }); /// ``` pub fn forward_t( &self, input_ids: Option<&Tensor>, attention_mask: Option<&Tensor>, encoder_outputs: Option<&Tensor>, decoder_input_ids: Option<&Tensor>, decoder_attention_mask: Option<&Tensor>, old_layer_states: Option<Vec<(Option<LayerState>, Option<LayerState>)>>, train: bool, ) -> BartModelOutput { let base_model_output = self.base_model.forward_t( input_ids, attention_mask, decoder_input_ids, encoder_outputs, decoder_attention_mask, old_layer_states, train, ); let lm_logits = base_model_output .decoder_output .linear::<Tensor>(&self.base_model.embeddings.ws, None); BartModelOutput { decoder_output: lm_logits, ..base_model_output } } pub fn encode(&self, input_ids: &Tensor, attention_mask: Option<&Tensor>) -> Tensor { self.base_model .encoder .forward_t( input_ids, attention_mask, &self.base_model.embeddings, false, ) .hidden_state } }
/// Forward pass through the model /// /// # Arguments /// /// * `input_ids` - Optional input tensor of shape (*batch size*, *sequence_length*). If None, pre-computed embeddings must be provided (see `input_embeds`) /// * `layer_past` - Unused for BART /// * `attention_mask` - Optional mask of shape (*batch size*, *sequence_length*). Masked position have value 0, non-masked value 1. If None set to 1 /// * `input_embeds` - Unused for BART /// * `token_type_ids` - Unused for BART /// * `position_ids` - Unused for BART /// * `encoder_outputs` - Optional tuple made of a tensor of shape (*batch size*, *source_sequence_length*, *encoder_hidden_dim*) and optional vectors of tensors of length *num_encoder_layers* with shape (*batch size*, *source_sequence_length*, *hidden_size*). /// These correspond to the encoder last hidden state and optional hidden states/attention weights for encoder layers. When provided, the encoder hidden state will not be recalculated. Useful for generation tasks. /// * `decoder_input_ids` - Optional input tensor of shape (*batch size*, *target_sequence_length*). Must be provided when running in generation mode (e.g. initialiazed with a BOS token) /// * `train` - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference. /// /// /// # Returns /// /// * `LMModelOutput` containing: /// - `lm_logits` - `Tensor` of shape (*batch size*, *sequence_length*, *vocab_size*) representing the logits for each vocab item and position /// - `cache` - `BartCache` made of `Option<Vec<(Option<Vec<&LayerState, &LayerState>>)>>` of length *n_layer* containing the encoder past keys and values for /// both the self attention and the encoder cross attention of each layer of the decoder. /// /// # Example /// /// ```no_run /// # use tch::{nn, Device, Tensor, no_grad}; /// # use rust_bert::Config; /// # use std::path::Path; /// # use tch::kind::Kind::{Int64, Double}; /// use rust_bert::bart::BartConfig; /// use rust_bert::marian::MarianForConditionalGeneration; /// # let config_path = Path::new("path/to/config.json"); /// # let vocab_path = Path::new("path/to/vocab.txt"); /// # let device = Device::Cpu; /// # let vs = nn::VarStore::new(device); /// # let config = BartConfig::from_file(config_path); /// # let marian_model = MarianForConditionalGeneration::new(&vs.root(), &config, false); /// let (batch_size, source_sequence_length, target_sequence_length) = (64, 128, 56); /// let input_tensor = Tensor::rand(&[batch_size, source_sequence_length], (Int64, device)); /// let target_tensor = Tensor::rand(&[batch_size, target_sequence_length], (Int64, device)); /// let encoder_attention_mask = /// Tensor::ones(&[batch_size, source_sequence_length], (Int64, device)); /// let decoder_attention_mask = /// Tensor::ones(&[batch_size, source_sequence_length], (Int64, device)); /// /// let model_output = no_grad(|| { /// marian_model.forward_t( /// Some(&input_tensor), /// Some(&encoder_attention_mask), /// None, /// Some(&target_tensor), /// Some(&decoder_attention_mask), /// None, /// false, /// ) /// }); /// ``` fn forward_t( &self, input_ids: &Option<Tensor>, cache: Cache, attention_mask: &Option<Tensor>, _token_type_ids: &Option<Tensor>, _position_ids: &Option<Tensor>, _input_embeds: &Option<Tensor>, encoder_outputs: Option<&Tensor>, decoder_input_ids: &Option<Tensor>, train: bool, ) -> Result<LMModelOutput, RustBertError> { let base_model_output = match cache { Cache::BARTCache(cached_layer_states) => self.base_model.forward_t( input_ids.as_ref(), attention_mask.as_ref(), decoder_input_ids.as_ref(), encoder_outputs, None, cached_layer_states, train, ), Cache::None => self.base_model.forward_t( input_ids.as_ref(), attention_mask.as_ref(), decoder_input_ids.as_ref(), encoder_outputs, None, None, train, ), _ => { return Err(RustBertError::ValueError( "Cache not compatible with Marian Model".into(), )); } }; let lm_logits = base_model_output .decoder_output .linear::<Tensor>(&self.base_model.embeddings.ws, None) + &self.final_logits_bias; Ok(LMModelOutput { lm_logits, cache: Cache::BARTCache(base_model_output.cache), }) } } /// # Language generation model based on the Marian architecture for machine translation pub struct MarianGenerator { model: MarianForConditionalGeneration, tokenizer: TokenizerOption, var_store: nn::VarStore, generate_config: GenerateConfig, bos_token_id: Option<i64>, eos_token_ids: Option<Vec<i64>>, pad_token_id: Option<i64>, is_encoder_decoder: bool, vocab_size: i64, decoder_start_id: Option<i64>, } impl MarianGenerator { /// Build a new `marianGenerator` /// /// # Arguments /// /// * `vocab_path` - Path to the model vocabulary, expected to have a structure following the [Transformers library](https://github.com/huggingface/transformers) convention /// * `sentencepiece_model_path` - Path to the sentencepiece model (native protobuf expected) /// * `config_path` - Path to the model configuration, expected to have a structure following the [Transformers library](https://github.com/huggingface/transformers) convention /// * `weights_path` - Path to the model weight files. These need to be converted form the `.bin` to `.ot` format using the utility script provided. /// * `device` - Device to run the model on, e.g. `Device::Cpu` or `Device::Cuda(0)` /// /// # Example /// /// ```no_run /// # use std::path::PathBuf; /// # use tch::Device; /// # fn main() -> anyhow::Result<()> { /// use rust_bert::marian::MarianGenerator; /// use rust_bert::pipelines::generation_utils::GenerateConfig; /// # let mut home: PathBuf = dirs::home_dir().unwrap(); /// # home.push("rustbert"); /// # home.push("marian-mt-en-fr"); /// # let config_path = &home.as_path().join("config.json"); /// # let vocab_path = &home.as_path().join("vocab.json"); /// # let merges_path = &home.as_path().join("spiece.model"); /// # let weights_path = &home.as_path().join("model.ot"); /// let device = Device::cuda_if_available(); /// let generate_config = GenerateConfig { /// max_length: 512, /// do_sample: true, /// num_beams: 6, /// temperature: 1.0, /// num_return_sequences: 1, /// ..Default::default() /// }; /// let marian_generator = MarianGenerator::new(generate_config)?; /// # Ok(()) /// # } /// ``` pub fn new(generate_config: GenerateConfig) -> Result<MarianGenerator, RustBertError> { let config_path = generate_config.config_resource.get_local_path()?; let vocab_path = generate_config.vocab_resource.get_local_path()?; let sentence_piece_path = generate_config.merges_resource.get_local_path()?; let weights_path = generate_config.model_resource.get_local_path()?; let device = generate_config.device; generate_config.validate(); let mut var_store = nn::VarStore::new(device); let tokenizer = TokenizerOption::from_file( ModelType::Marian, vocab_path.to_str().unwrap(), Some(sentence_piece_path.to_str().unwrap()), false, None, None, )?; let config = BartConfig::from_file(config_path); let model = MarianForConditionalGeneration::new(&var_store.root(), &config, true); var_store.load(weights_path)?; let bos_token_id = Some(0); let eos_token_ids = Some(tokenizer.convert_tokens_to_ids(&[MarianVocab::eos_value()])); let pad_token_id = Some(tokenizer.convert_tokens_to_ids(&[MarianVocab::pad_value()])[0]); let vocab_size = config.vocab_size; let is_encoder_decoder = true; let decoder_start_id = Some(tokenizer.convert_tokens_to_ids(&[MarianVocab::pad_value()])[0]); Ok(MarianGenerator { model, tokenizer, var_store, generate_config, bos_token_id, eos_token_ids, pad_token_id, is_encoder_decoder, vocab_size, decoder_start_id, }) } fn force_token_id_generation(&self, scores: &mut Tensor, token_ids: &[i64]) { let impossible_tokens: Vec<i64> = (0..self.get_vocab_size() as i64) .filter(|pos| !token_ids.contains(pos)) .collect(); let impossible_tokens = Tensor::of_slice(&impossible_tokens).to_device(scores.device()); let _ = scores.index_fill_(1, &impossible_tokens, f64::NEG_INFINITY); } } impl PrivateLanguageGenerator<MarianForConditionalGeneration, MarianVocab, MarianTokenizer> for MarianGenerator { fn get_model(&self) -> &MarianForConditionalGeneration { &self.model } fn get_tokenizer(&self) -> &TokenizerOption { &self.tokenizer } fn get_var_store(&self) -> &nn::VarStore { &self.var_store } fn get_config(&self) -> &GenerateConfig { &self.generate_config } fn get_bos_id(&self) -> &Option<i64> { &self.bos_token_id } fn get_eos_ids(&self) -> &Option<Vec<i64>> { &self.eos_token_ids } fn get_pad_id(&self) -> &Option<i64> { &self.pad_token_id } fn is_encoder_decoder(&self) -> bool { self.is_encoder_decoder } fn get_vocab_size(&self) -> i64 { self.vocab_size } fn get_decoder_start_id(&self) -> Option<i64> { self.decoder_start_id } fn prepare_scores_for_generation( &self, scores: &mut Tensor, current_length: i64, max_length: i64, ) { let _ = scores.index_fill_( 1, &Tensor::of_slice(&[self.get_pad_id().unwrap()]) .to_kind(Kind::Int64) .to_device(scores.device()), std::f64::NEG_INFINITY, ); if current_length == max_length - 1 { self.force_token_id_generation(scores, self.get_eos_ids().as_ref().unwrap()); } } fn encode(&self, input_ids: &Tensor, attention_mask: Option<&Tensor>) -> Option<Tensor> { Some(self.get_model().encode(input_ids, attention_mask)) } fn prepare_inputs_for_generation<'a>( &self, input_ids: Tensor, encoder_outputs: Option<&'a Tensor>, past: Cache, attention_mask: Tensor, ) -> PreparedInput<'a> { match past { Cache::BARTCache(past) => PreparedInput { prepared_input: None, prepared_attention_mask: Some(attention_mask), prepared_encoder_output: encoder_outputs, prepared_decoder_input: Some(input_ids), prepared_position_ids: None, prepared_past: Cache::BARTCache(past), }, Cache::None => PreparedInput { prepared_input: None, prepared_attention_mask: Some(attention_mask), prepared_encoder_output: encoder_outputs, prepared_decoder_input: Some(input_ids), prepared_position_ids: None, prepared_past: Cache::BARTCache(None), }, _ => panic!("Cache type incompatible with Marian"), } } fn encode_prompt_text<'a, T>( &self, prompt_text: T, max_len: i64, pad_token_id: Option<i64>, ) -> Tensor where T: AsRef<[&'a str]>, { let tokens = self.get_tokenizer().encode_list( prompt_text.as_ref(), max_len as usize, &TruncationStrategy::LongestFirst, 0, ); let token_ids = tokens .into_iter() .map(|tokenized_input| tokenized_input.token_ids) .collect::<Vec<Vec<i64>>>(); let max_len = token_ids.iter().map(|input| input.len()).max().unwrap(); let pad_token = match pad_token_id { Some(value) => value, None => self.get_tokenizer().get_unk_id(), }; let token_ids = token_ids .into_iter() .map(|mut input| { let temp = vec![pad_token; max_len - input.len()]; input.extend(temp); input }) .map(|tokens| Tensor::of_slice(&tokens).to(self.get_var_store().device())) .collect::<Vec<Tensor>>(); Tensor::stack(&token_ids, 0) } fn reorder_cache( &self, past: &mut Cache, encoder_outputs: Option<Tensor>, beam_indices: &Tensor, ) -> Option<Tensor> { let encoder_outputs = match encoder_outputs { Some(value) => Some(value.index_select(0, beam_indices)), None => None, }; match past { Cache::BARTCache(old_cache_option) => match old_cache_option { Some(old_cache) => { for (self_layer_state, encoder_layer_state) in old_cache.iter_mut() { if self_layer_state.is_some() { self_layer_state .as_mut() .unwrap() .reorder_cache(beam_indices) }; if encoder_layer_state.is_some() { encoder_layer_state .as_mut() .unwrap() .reorder_cache(beam_indices) }; } } None => {} }, Cache::None => {} _ => { panic!("Invalid cache for BART model"); } }; encoder_outputs } } impl LanguageGenerator<MarianForConditionalGeneration, MarianVocab, MarianTokenizer> for MarianGenerator { }
impl LMHeadModel for MarianForConditionalGeneration {
provider.go
package github import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func Provider() terraform.ResourceProvider { p := &schema.Provider{ Schema: map[string]*schema.Schema{ "token": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("GITHUB_TOKEN", nil), Description: descriptions["token"], }, "owner": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("GITHUB_OWNER", nil), Description: descriptions["owner"], }, "organization": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("GITHUB_ORGANIZATION", nil), Description: descriptions["organization"], Deprecated: "Use owner (or GITHUB_OWNER) instead of organization (or GITHUB_ORGANIZATION)", }, "base_url": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("GITHUB_BASE_URL", "https://api.github.com/"), Description: descriptions["base_url"], }, "insecure": { Type: schema.TypeBool, Optional: true, Default: false, Description: descriptions["insecure"], }, "app_auth": { Type: schema.TypeList, Optional: true, MaxItems: 1, Description: descriptions["app_auth"], Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "id": { Type: schema.TypeString, Required: true, DefaultFunc: schema.EnvDefaultFunc("GITHUB_APP_ID", nil), Description: descriptions["app_auth.id"], }, "installation_id": { Type: schema.TypeString, Required: true, DefaultFunc: schema.EnvDefaultFunc("GITHUB_APP_INSTALLATION_ID", nil), Description: descriptions["app_auth.installation_id"], }, "pem_file": { Type: schema.TypeString, Required: true, Sensitive: true, DefaultFunc: schema.EnvDefaultFunc("GITHUB_APP_PEM_FILE", nil), Description: descriptions["app_auth.pem_file"], }, }, }, ConflictsWith: []string{"token"}, }, }, ResourcesMap: map[string]*schema.Resource{ "github_actions_organization_secret": resourceGithubActionsOrganizationSecret(), "github_actions_secret": resourceGithubActionsSecret(), "github_app_installation_repository": resourceGithubAppInstallationRepository(), "github_branch": resourceGithubBranch(), "github_branch_protection": resourceGithubBranchProtection(), "github_branch_protection_v3": resourceGithubBranchProtectionV3(), "github_issue_label": resourceGithubIssueLabel(), "github_membership": resourceGithubMembership(), "github_organization_block": resourceOrganizationBlock(), "github_organization_project": resourceGithubOrganizationProject(), "github_organization_webhook": resourceGithubOrganizationWebhook(), "github_project_card": resourceGithubProjectCard(), "github_project_column": resourceGithubProjectColumn(), "github_repository_collaborator": resourceGithubRepositoryCollaborator(), "github_repository_deploy_key": resourceGithubRepositoryDeployKey(), "github_repository_file": resourceGithubRepositoryFile(), "github_repository_milestone": resourceGithubRepositoryMilestone(), "github_repository_project": resourceGithubRepositoryProject(), "github_repository_pull_request": resourceGithubRepositoryPullRequest(), "github_repository_webhook": resourceGithubRepositoryWebhook(), "github_repository": resourceGithubRepository(), "github_team_membership": resourceGithubTeamMembership(), "github_team_repository": resourceGithubTeamRepository(), "github_team_sync_group_mapping": resourceGithubTeamSyncGroupMapping(), "github_team": resourceGithubTeam(), "github_user_gpg_key": resourceGithubUserGpgKey(), "github_user_invitation_accepter": resourceGithubUserInvitationAccepter(), "github_user_ssh_key": resourceGithubUserSshKey(), "github_branch_default": resourceGithubBranchDefault(), }, DataSourcesMap: map[string]*schema.Resource{ "github_actions_public_key": dataSourceGithubActionsPublicKey(), "github_branch": dataSourceGithubBranch(), "github_collaborators": dataSourceGithubCollaborators(), "github_ip_ranges": dataSourceGithubIpRanges(), "github_membership": dataSourceGithubMembership(), "github_organization": dataSourceGithubOrganization(), "github_organization_team_sync_groups": dataSourceGithubOrganizationTeamSyncGroups(), "github_organization_teams": dataSourceGithubOrganizationTeams(), "github_release": dataSourceGithubRelease(), "github_repositories": dataSourceGithubRepositories(), "github_repository": dataSourceGithubRepository(), "github_repository_milestone": dataSourceGithubRepositoryMilestone(), "github_repository_pull_request": dataSourceGithubRepositoryPullRequest(), "github_repository_pull_requests": dataSourceGithubRepositoryPullRequests(), "github_team": dataSourceGithubTeam(), "github_user": dataSourceGithubUser(), }, } p.ConfigureFunc = providerConfigure(p) return p } var descriptions map[string]string func init() { descriptions = map[string]string{ "token": "The OAuth token used to connect to GitHub. Anonymous mode is enabled if both `token` and " + "`app_auth` are not set.", "base_url": "The GitHub Base API URL", "insecure": "Enable `insecure` mode for testing purposes", "owner": "The GitHub owner name to manage. " + "Use this field instead of `organization` when managing individual accounts.", "organization": "The GitHub organization name to manage. " + "Use this field instead of `owner` when managing organization accounts.", "app_auth": "The GitHub App credentials used to connect to GitHub. Conflicts with " + "`token`. Anonymous mode is enabled if both `token` and `app_auth` are not set.", "app_auth.id": "The GitHub App ID.", "app_auth.installation_id": "The GitHub App installation instance ID.", "app_auth.pem_file": "The GitHub App PEM file contents.", } } func providerConfigure(p *schema.Provider) schema.ConfigureFunc { return func(d *schema.ResourceData) (interface{}, error) { owner := d.Get("owner").(string) baseURL := d.Get("base_url").(string) token := d.Get("token").(string) insecure := d.Get("insecure").(bool) // BEGIN backwards compatibility // OwnerOrOrgEnvDefaultFunc used to be the default value for both // 'owner' and 'organization'. This meant that if 'owner' and // 'GITHUB_OWNER' were set, 'GITHUB_OWNER' would be used as the default // value of 'organization' and therefore override 'owner'. // // This seems undesirable (an environment variable should not override // an explicitly set value in a provider block), but is necessary // for backwards compatibility. We could remove this backwards compatibility // code in a future major release. env, _ := OwnerOrOrgEnvDefaultFunc() if env.(string) != "" { owner = env.(string) } // END backwards compatibility org := d.Get("organization").(string) if org != "" { owner = org } if appAuth, ok := d.Get("app_auth").([]interface{}); ok && len(appAuth) > 0 && appAuth[0] != nil { appAuthAttr := appAuth[0].(map[string]interface{}) var appID, appInstallationID, appPemFile string if v, ok := appAuthAttr["id"].(string); ok && v != "" { appID = v } else { return nil, fmt.Errorf("app_auth.id must be set and contain a non-empty value") } if v, ok := appAuthAttr["installation_id"].(string); ok && v != "" { appInstallationID = v } else { return nil, fmt.Errorf("app_auth.installation_id must be set and contain a non-empty value") } if v, ok := appAuthAttr["pem_file"].(string); ok && v != "" { appPemFile = v } else { return nil, fmt.Errorf("app_auth.pem_file must be set and contain a non-empty value") } appToken, err := GenerateOAuthTokenFromApp(baseURL, appID, appInstallationID, appPemFile) if err != nil
token = appToken } config := Config{ Token: token, BaseURL: baseURL, Insecure: insecure, Owner: owner, } meta, err := config.Meta() if err != nil { return nil, err } meta.(*Owner).StopContext = p.StopContext() return meta, nil } }
{ return nil, err }
FinalModelOptionalEnumSimple.go
// Automatically generated by the Fast Binary Encoding compiler, do not modify! // https://github.com/chronoxor/FastBinaryEncoding // Source: FBE // Version: 1.4.0.0 package test import "errors" import "../fbe" import "../proto" // Workaround for Go unused imports issue var _ = errors.New var _ = fbe.Version var _ = proto.Version // Fast Binary Encoding optional EnumSimple final model type FinalModelOptionalEnumSimple struct { // Final model buffer buffer *fbe.Buffer // Final model buffer offset offset int // Base final model value value *FinalModelEnumSimple } // Create a new optional EnumSimple final model func NewFinalModelOptionalEnumSimple(buffer *fbe.Buffer, offset int) *FinalModelOptionalEnumSimple { fbeResult := FinalModelOptionalEnumSimple{buffer: buffer, offset: offset} fbeResult.value = NewFinalModelEnumSimple(buffer, 0) return &fbeResult } // Get the optional final model value func (fm *FinalModelOptionalEnumSimple) Value() *FinalModelEnumSimple { return fm.value } // Get the allocation size func (fm *FinalModelOptionalEnumSimple) FBEAllocationSize(fbeValue *EnumSimple) int { if fbeValue != nil { return 1 + fm.value.FBEAllocationSize(fbeValue) } else { return 1 } } // Get the final offset func (fm *FinalModelOptionalEnumSimple) FBEOffset() int { return fm.offset } // Set the final offset func (fm *FinalModelOptionalEnumSimple) SetFBEOffset(value int) { fm.offset = value } // Shift the current final offset func (fm *FinalModelOptionalEnumSimple) FBEShift(size int) { fm.offset += size } // Unshift the current final offset func (fm *FinalModelOptionalEnumSimple) FBEUnshift(size int) { fm.offset -= size } // Check if the object contains a value func (fm *FinalModelOptionalEnumSimple) HasValue() bool { if (fm.buffer.Offset() + fm.FBEOffset() + 1) > fm.buffer.Size() { return false } fbeHasValue := fbe.ReadUInt8(fm.buffer.Data(), fm.buffer.Offset() + fm.FBEOffset()) return fbeHasValue != 0 } // Check if the optional value is valid func (fm *FinalModelOptionalEnumSimple) Verify() int { if (fm.buffer.Offset() + fm.FBEOffset() + 1) > fm.buffer.Size() { return fbe.MaxInt } fbeHasValue := fbe.ReadUInt8(fm.buffer.Data(), fm.buffer.Offset() + fm.FBEOffset()) if fbeHasValue == 0 { return 1 } fm.buffer.Shift(fm.FBEOffset() + 1) fbeResult := fm.value.Verify() fm.buffer.Unshift(fm.FBEOffset() + 1) return 1 + fbeResult } // Get the optional value func (fm *FinalModelOptionalEnumSimple) Get() (*EnumSimple, int, error) { var fbeValue *EnumSimple = nil if (fm.buffer.Offset() + fm.FBEOffset() + 1) > fm.buffer.Size() { return fbeValue, 0, errors.New("model is broken") } if !fm.HasValue() { return fbeValue, 1, nil } var fbeResult int var err error fbeValue = NewEnumSimple() fm.buffer.Shift(fm.FBEOffset() + 1) fbeResult, err = fm.value.GetValue(fbeValue) fm.buffer.Unshift(fm.FBEOffset() + 1) return fbeValue, 1 + fbeResult, err } // Set the optional value func (fm *FinalModelOptionalEnumSimple) Set(fbeValue *EnumSimple) (int, error) { if (fm.buffer.Offset() + fm.FBEOffset() + 1) > fm.buffer.Size() { return 0, errors.New("model is broken") } fbeHasValue := uint8(0) if fbeValue != nil { fbeHasValue = uint8(1) } fbe.WriteUInt8(fm.buffer.Data(), fm.buffer.Offset() + fm.FBEOffset(), fbeHasValue) if fbeHasValue == 0 { return 1, nil } fm.buffer.Shift(fm.FBEOffset() + 1)
fm.buffer.Unshift(fm.FBEOffset() + 1) return 1 + fbeResult, err }
fbeResult, err := fm.value.Set(fbeValue)
percent-encoding.py
import base64 from wptserve.utils import isomorphic_decode # Use numeric references to let the HTML parser take care of inserting the correct code points # rather than trying to figure out the necessary bytes for each encoding. (The latter can be # especially tricky given that Python does not implement the Encoding Standard.) def numeric_references(input): output = b"" for cp in input: output += b"&#x" + format(ord(cp), b"X") + b";" return output def main(request, response): # Undo the "magic" space with + replacement as otherwise base64 decoding will fail. value = request.GET.first(b"value").replace(" ", "+")
output_value = numeric_references(base64.b64decode(value).decode(b"utf-8")) return ( [(b"Content-Type", b"text/html;charset=" + encoding)], b"""<!doctype html> <a href="https://doesnotmatter.invalid/?%s#%s">test</a> """ % (output_value, output_value))
encoding = request.GET.first(b"encoding")
agent.go
Name() string }
package network // Agent - в рамках данного пакета нас итересуют только имена агентов type Agent interface {
config_test.go
package maintenancemode import ( "context" "testing" "time" api "github.com/SAP/stewardci-core/pkg/apis/steward/v1alpha1" "github.com/SAP/stewardci-core/pkg/k8s/fake" mocks "github.com/SAP/stewardci-core/pkg/k8s/mocks" corev1clientmocks "github.com/SAP/stewardci-core/pkg/k8s/mocks/client-go/corev1" gomock "github.com/golang/mock/gomock" "github.com/pkg/errors" "gotest.tools/assert" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/system" _ "knative.dev/pkg/system/testing" ) func Test_IsMaintenanceMode_getError_(t *testing.T) { t.Parallel() // SETUP ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() cf, configMapIfce := newFactoryWithConfigMapIfce(mockCtrl) expectErrorOnGetConfigMap(configMapIfce, api.MaintenanceModeConfigMapName, errors.New("some error")) // EXERCISE _, resultErr := IsMaintenanceMode(ctx, cf) // VERIFY assert.Error(t, resultErr, `invalid configuration: ConfigMap "steward-maintenance-mode" in namespace "knative-testing": some error`) } func Test_IsMaintenanceMode_get_NotFoundError(t *testing.T) { t.Parallel() // SETUP ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() cf, configMapIfce := newFactoryWithConfigMapIfce(mockCtrl) expectErrorOnGetConfigMap(configMapIfce, api.MaintenanceModeConfigMapName, k8serrors.NewNotFound(api.Resource("pipelineruns"), "")) // EXERCISE result, resultErr := IsMaintenanceMode(ctx, cf) // VERIFY assert.Assert(t, result == false) assert.NilError(t, resultErr) } func Test_IsMaintenanceMode_configMapHasDeletionTimestamp(t *testing.T) { t.Parallel() // SETUP ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() cm := newMaintenanceModeConfigMap(map[string]string{ "maintenanceMode": "true", })
cm.ObjectMeta.DeletionTimestamp = &metav1.Time{Time: time.Now()} cf := fake.NewClientFactory(cm) // EXERCISE result, resultErr := IsMaintenanceMode(ctx, cf) // VERIFY assert.Assert(t, result == false) assert.NilError(t, resultErr) } func Test_loadControllerConfig(t *testing.T) { t.Parallel() for _, tc := range []struct { name string configData map[string]string expected bool }{ { "MaintenanceModeEnabled", map[string]string{ "maintenanceMode": "true", }, true, }, { "MaintenanceModeDisabled", map[string]string{ "maintenanceMode": "false", }, false, }, { "MaintenanceModeMissing", map[string]string{}, false, }, } { t.Run(tc.name, func(t *testing.T) { tc := tc // capture current value before going parallel t.Parallel() // SETUP ctx := context.Background() cf := fake.NewClientFactory( newMaintenanceModeConfigMap(tc.configData), ) // EXERCISE result, resultErr := IsMaintenanceMode(ctx, cf) // VERIFY assert.NilError(t, resultErr) assert.Equal(t, tc.expected, result) }) } } func newMaintenanceModeConfigMap(data map[string]string) *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: api.MaintenanceModeConfigMapName, Namespace: system.Namespace(), }, Data: data, } } func newFactoryWithConfigMapIfce(mockCtrl *gomock.Controller) (*mocks.MockClientFactory, *corev1clientmocks.MockConfigMapInterface) { cf := mocks.NewMockClientFactory(mockCtrl) coreV1Ifce := corev1clientmocks.NewMockCoreV1Interface(mockCtrl) cf.EXPECT().CoreV1().Return(coreV1Ifce).AnyTimes() configMapIfce := corev1clientmocks.NewMockConfigMapInterface(mockCtrl) coreV1Ifce.EXPECT().ConfigMaps(gomock.Any()).Return(configMapIfce).AnyTimes() return cf, configMapIfce } func expectErrorOnGetConfigMap(configMapIfce *corev1clientmocks.MockConfigMapInterface, configMapName string, expectedError error) { configMapIfce.EXPECT(). Get(gomock.Any(), configMapName, gomock.Any()). Return(nil, expectedError). Times(1) }
test_resourcerelease.py
# -*- coding: utf-8 -*- """ Tool tests meant to be run with pytest. Testing whether issue #596 has been repaired. Note: Platform dependent test. Will only fail on Windows > NT. """ import time from os import remove from os.path import join from moviepy.video.compositing.CompositeVideoClip import clips_array from moviepy.video.io.VideoFileClip import VideoFileClip from moviepy.video.VideoClip import ColorClip from tests.test_helper import TMP_DIR def test_release_of_file_via_close(): # Create a random video file.
red = ColorClip((256, 200), color=(255, 0, 0)) green = ColorClip((256, 200), color=(0, 255, 0)) blue = ColorClip((256, 200), color=(0, 0, 255)) red.fps = green.fps = blue.fps = 10 # Repeat this so we can see no conflicts. for i in range(3): # Get the name of a temporary file we can use. local_video_filename = join( TMP_DIR, "test_release_of_file_via_close_%s.mp4" % int(time.time()) ) clip = clips_array([[red, green, blue]]).with_duration(0.5) clip.write_videofile(local_video_filename) # Open it up with VideoFileClip. video = VideoFileClip(local_video_filename) video.close() clip.close() # Now remove the temporary file. # This would fail on Windows if the file is still locked. # This should succeed without exceptions. remove(local_video_filename) red.close() green.close() blue.close()
test_func_api_classification_multiclass.py
#------------------------------------------------------------------------------- # !!! cross_val_predict uses stratified split #------------------------------------------------------------------------------- # Main concept for testing returned arrays: # 1). create ground truth e.g. with cross_val_predict # 2). run vecstack # 3). compare returned arrays with ground truth # 4). compare arrays from file with ground truth #------------------------------------------------------------------------------- from __future__ import print_function from __future__ import division import unittest from numpy.testing import assert_array_equal # from numpy.testing import assert_allclose from numpy.testing import assert_equal import os import glob import numpy as np import scipy.stats as st from sklearn.model_selection import cross_val_predict from sklearn.model_selection import cross_val_score # from sklearn.model_selection import train_test_split from sklearn.model_selection import StratifiedKFold from sklearn.datasets import make_classification from sklearn.metrics import accuracy_score from sklearn.metrics import log_loss from sklearn.metrics import make_scorer from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from vecstack import stacking n_classes = 3 n_folds = 5 temp_dir = 'tmpdw35lg54ms80eb42' X, y = make_classification(n_samples = 500, n_features = 5, n_informative = 3, n_redundant = 1, n_classes = n_classes, flip_y = 0, random_state = 0) # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0) # Make train/test split by hand to avoid strange errors probably related to testing suit: # https://github.com/scikit-learn/scikit-learn/issues/1684 # https://github.com/scikit-learn/scikit-learn/issues/1704 # Note: Python 2.7, 3.4 - OK, but 3.5, 3.6 - error np.random.seed(0) ind = np.arange(500) np.random.shuffle(ind) ind_train = ind[:400] ind_test = ind[400:] X_train = X[ind_train] X_test = X[ind_test] y_train = y[ind_train] y_test = y[ind_test] # Create 4-dim data np.random.seed(42) X_train_4d = np.random.normal(size=(400, 8, 8, 3)) X_test_4d = np.random.normal(size=(100, 8, 8, 3)) y_train_4d = np.random.randint(n_classes, size=400) # Reshape 4-dim to 2-dim X_train_4d_unrolled = X_train_4d.reshape(X_train_4d.shape[0], -1) X_test_4d_unrolled = X_test_4d.reshape(X_test_4d.shape[0], -1) #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ class LogisticRegressionUnrolled(LogisticRegression): """ For tests related to N-dim input. Estimator accepts N-dim array and reshape it to 2-dim array """ def fit(self, X, y): return super(LogisticRegressionUnrolled, self).fit(X.reshape(X.shape[0], -1), y) def predict(self, X): return super(LogisticRegressionUnrolled, self).predict(X.reshape(X.shape[0], -1)) def predict_proba(self, X): return super(LogisticRegressionUnrolled, self).predict_proba(X.reshape(X.shape[0], -1)) #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- class TestFuncClassificationMulticlass(unittest.TestCase): @classmethod def setUpClass(cls): try: os.mkdir(temp_dir) except: print('Unable to create temp dir') @classmethod def tearDownClass(cls): try: os.rmdir(temp_dir) except: print('Unable to remove temp dir') def tearDown(self): # Remove files after each test files = glob.glob(os.path.join(temp_dir, '*.npy')) files.extend(glob.glob(os.path.join(temp_dir, '*.log.txt'))) try: for file in files: os.remove(file) except: print('Unable to remove temp file') #--------------------------------------------------------------------------- # Test returned and saved arrays in each mode (parameter <mode>) # Here we also test parameter <stratified> #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- # Predict labels #--------------------------------------------------------------------------- def test_oof_pred_mode(self): model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1) _ = model.fit(X_train, y_train) S_test_1 = model.predict(X_test).reshape(-1, 1) models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, mode = 'oof_pred', random_state = 0, verbose = 0, stratified = True) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) def
(self): model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1) S_test_1 = None models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, mode = 'oof', random_state = 0, verbose = 0, stratified = True) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) def test_pred_mode(self): model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') S_train_1 = None _ = model.fit(X_train, y_train) S_test_1 = model.predict(X_test).reshape(-1, 1) models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, mode = 'pred', random_state = 0, verbose = 0, stratified = True) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) def test_oof_pred_bag_mode(self): S_test_temp = np.zeros((X_test.shape[0], n_folds)) # Usind StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0) for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)): # Split data and target X_tr = X_train[tr_index] y_tr = y_train[tr_index] X_te = X_train[te_index] y_te = y_train[te_index] model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') _ = model.fit(X_tr, y_tr) S_test_temp[:, fold_counter] = model.predict(X_test) S_test_1 = st.mode(S_test_temp, axis = 1)[0] model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1) models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, mode = 'oof_pred_bag', random_state = 0, verbose = 0, stratified = True) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) def test_pred_bag_mode(self): S_test_temp = np.zeros((X_test.shape[0], n_folds)) # Usind StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0) for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)): # Split data and target X_tr = X_train[tr_index] y_tr = y_train[tr_index] X_te = X_train[te_index] y_te = y_train[te_index] model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') _ = model.fit(X_tr, y_tr) S_test_temp[:, fold_counter] = model.predict(X_test) S_test_1 = st.mode(S_test_temp, axis = 1)[0] S_train_1 = None models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, mode = 'pred_bag', random_state = 0, verbose = 0, stratified = True) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) #--------------------------------------------------------------------------- # Predict proba #--------------------------------------------------------------------------- def test_oof_pred_mode_proba(self): model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict_proba') _ = model.fit(X_train, y_train) S_test_1 = model.predict_proba(X_test) models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = False, stratified = True, mode = 'oof_pred', random_state = 0, verbose = 0, needs_proba = True, save_dir=temp_dir) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) def test_oof_mode_proba(self): model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict_proba') S_test_1 = None models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = False, stratified = True, mode = 'oof', random_state = 0, verbose = 0, needs_proba = True, save_dir=temp_dir) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) def test_pred_mode_proba(self): model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') S_train_1 = None _ = model.fit(X_train, y_train) S_test_1 = model.predict_proba(X_test) models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = False, stratified = True, mode = 'pred', random_state = 0, verbose = 0, needs_proba = True, save_dir=temp_dir) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) def test_oof_pred_bag_mode_proba(self): S_test_1 = np.zeros((X_test.shape[0], n_classes)) S_test_temp = np.zeros((X_test.shape[0], n_folds * n_classes)) # Using StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0) for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)): # Split data and target X_tr = X_train[tr_index] y_tr = y_train[tr_index] X_te = X_train[te_index] y_te = y_train[te_index] model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') _ = model.fit(X_tr, y_tr) col_slice_fold = slice(fold_counter * n_classes, fold_counter * n_classes + n_classes) S_test_temp[:, col_slice_fold] = model.predict_proba(X_test) for class_id in range(n_classes): S_test_1[:, class_id] = np.mean(S_test_temp[:, class_id::n_classes], axis = 1) model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict_proba') models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, mode = 'oof_pred_bag', random_state = 0, verbose = 0, stratified = True, needs_proba = True) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] #@@@@ # Look at proba # print('\nOne model') # print('etalon') # print(S_test_1[:2]) # print('vecstack') # print(S_test_2[:2]) #@@@@ assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) def test_pred_bag_mode_proba(self): S_test_1 = np.zeros((X_test.shape[0], n_classes)) S_test_temp = np.zeros((X_test.shape[0], n_folds * n_classes)) # Using StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0) for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)): # Split data and target X_tr = X_train[tr_index] y_tr = y_train[tr_index] X_te = X_train[te_index] y_te = y_train[te_index] model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') _ = model.fit(X_tr, y_tr) col_slice_fold = slice(fold_counter * n_classes, fold_counter * n_classes + n_classes) S_test_temp[:, col_slice_fold] = model.predict_proba(X_test) for class_id in range(n_classes): S_test_1[:, class_id] = np.mean(S_test_temp[:, class_id::n_classes], axis = 1) S_train_1 = None models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, mode = 'pred_bag', random_state = 0, verbose = 0, stratified = True, needs_proba = True) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) #--------------------------------------------------------------------------- # Test <shuffle> and <random_state> parameters #--------------------------------------------------------------------------- def test_oof_pred_bag_mode_shuffle(self): S_test_temp = np.zeros((X_test.shape[0], n_folds)) # Usind StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold kf = StratifiedKFold(n_splits = n_folds, shuffle = True, random_state = 0) for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)): # Split data and target X_tr = X_train[tr_index] y_tr = y_train[tr_index] X_te = X_train[te_index] y_te = y_train[te_index] model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') _ = model.fit(X_tr, y_tr) S_test_temp[:, fold_counter] = model.predict(X_test) S_test_1 = st.mode(S_test_temp, axis = 1)[0] model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') # !!! Important. Here we pass CV-generator not number of folds <cv = kf> S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = kf, n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1) models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = True, save_dir=temp_dir, mode = 'oof_pred_bag', random_state = 0, verbose = 0, stratified = True) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) #--------------------------------------------------------------------------- # Test <metric> parameter and its default values depending on <regression> parameter # Labels # Important. We use <greater_is_better = True> in <make_scorer> for any error function # because we need raw scores (without minus sign) #--------------------------------------------------------------------------- def test_oof_mode_metric(self): model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') scorer = make_scorer(accuracy_score) scores = cross_val_score(model, X_train, y = y_train, cv = n_folds, scoring = scorer, n_jobs = 1, verbose = 0) mean_str_1 = '%.8f' % np.mean(scores) std_str_1 = '%.8f' % np.std(scores) models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')] S_train, S_test = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, save_dir=temp_dir, mode = 'oof', random_state = 0, verbose = 0, stratified = True) # Load mean score and std from file # Normally if cleaning is performed there is only one .log.txt file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.log.txt')))[-1] # take the latest file with open(file_name) as f: for line in f: if 'MEAN' in line: split = line.strip().split() break mean_str_2 = split[1][1:-1] std_str_2 = split[3][1:-1] assert_equal(mean_str_1, mean_str_2) assert_equal(std_str_1, std_str_2) #--------------------------------------------------------------------------- # Test <metric> parameter and its default values depending on <regression> parameter # Proba # Important. We use <greater_is_better = True> in <make_scorer> for any error function # because we need raw scores (without minus sign) #--------------------------------------------------------------------------- def test_oof_mode_metric_proba(self): model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') scorer = make_scorer(log_loss, needs_proba = True) scores = cross_val_score(model, X_train, y = y_train, cv = n_folds, scoring = scorer, n_jobs = 1, verbose = 0) mean_str_1 = '%.8f' % np.mean(scores) std_str_1 = '%.8f' % np.std(scores) models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')] S_train, S_test = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, save_dir=temp_dir, mode = 'oof', random_state = 0, verbose = 0, stratified = True, needs_proba = True) # Load mean score and std from file # Normally if cleaning is performed there is only one .log.txt file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.log.txt')))[-1] # take the latest file with open(file_name) as f: for line in f: if 'MEAN' in line: split = line.strip().split() break mean_str_2 = split[1][1:-1] std_str_2 = split[3][1:-1] assert_equal(mean_str_1, mean_str_2) assert_equal(std_str_1, std_str_2) #------------------------------------------------------------------------------- # Test several mdels in one run #------------------------------------------------------------------------------- def test_oof_pred_mode_2_models(self): # Model a model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') S_train_1_a = cross_val_predict(model, X_train, y = y_train, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1) _ = model.fit(X_train, y_train) S_test_1_a = model.predict(X_test).reshape(-1, 1) # Model b model = GaussianNB() S_train_1_b = cross_val_predict(model, X_train, y = y_train, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1) _ = model.fit(X_train, y_train) S_test_1_b = model.predict(X_test).reshape(-1, 1) S_train_1 = np.c_[S_train_1_a, S_train_1_b] S_test_1 = np.c_[S_test_1_a, S_test_1_b] models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr'), GaussianNB()] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, mode = 'oof_pred', random_state = 0, verbose = 0, stratified = True) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) def test_oof_pred_bag_mode_2_models(self): # Model a S_test_temp = np.zeros((X_test.shape[0], n_folds)) # Usind StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0) for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)): # Split data and target X_tr = X_train[tr_index] y_tr = y_train[tr_index] X_te = X_train[te_index] y_te = y_train[te_index] model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') _ = model.fit(X_tr, y_tr) S_test_temp[:, fold_counter] = model.predict(X_test) S_test_1_a = st.mode(S_test_temp, axis = 1)[0] model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') S_train_1_a = cross_val_predict(model, X_train, y = y_train, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1) # Model b S_test_temp = np.zeros((X_test.shape[0], n_folds)) # Usind StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0) for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)): # Split data and target X_tr = X_train[tr_index] y_tr = y_train[tr_index] X_te = X_train[te_index] y_te = y_train[te_index] model = GaussianNB() _ = model.fit(X_tr, y_tr) S_test_temp[:, fold_counter] = model.predict(X_test) S_test_1_b = st.mode(S_test_temp, axis = 1)[0] model = GaussianNB() S_train_1_b = cross_val_predict(model, X_train, y = y_train, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1) S_train_1 = np.c_[S_train_1_a, S_train_1_b] S_test_1 = np.c_[S_test_1_a, S_test_1_b] models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr'), GaussianNB()] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, mode = 'oof_pred_bag', random_state = 0, verbose = 0, stratified = True) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) def test_oof_pred_mode_proba_2_models(self): # Model a model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') S_train_1_a = cross_val_predict(model, X_train, y = y_train, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict_proba') _ = model.fit(X_train, y_train) S_test_1_a = model.predict_proba(X_test) # Model b model = GaussianNB() S_train_1_b = cross_val_predict(model, X_train, y = y_train, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict_proba') _ = model.fit(X_train, y_train) S_test_1_b = model.predict_proba(X_test) S_train_1 = np.c_[S_train_1_a, S_train_1_b] S_test_1 = np.c_[S_test_1_a, S_test_1_b] models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr'), GaussianNB()] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = False, stratified = True, mode = 'oof_pred', random_state = 0, verbose = 0, needs_proba = True, save_dir=temp_dir) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) def test_oof_pred_bag_mode_proba_2_models(self): # Model a S_test_1_a = np.zeros((X_test.shape[0], n_classes)) S_test_temp = np.zeros((X_test.shape[0], n_folds * n_classes)) # Using StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0) for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)): # Split data and target X_tr = X_train[tr_index] y_tr = y_train[tr_index] X_te = X_train[te_index] y_te = y_train[te_index] model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') _ = model.fit(X_tr, y_tr) col_slice_fold = slice(fold_counter * n_classes, fold_counter * n_classes + n_classes) S_test_temp[:, col_slice_fold] = model.predict_proba(X_test) for class_id in range(n_classes): S_test_1_a[:, class_id] = np.mean(S_test_temp[:, class_id::n_classes], axis = 1) model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') S_train_1_a = cross_val_predict(model, X_train, y = y_train, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict_proba') # Model b S_test_1_b = np.zeros((X_test.shape[0], n_classes)) S_test_temp = np.zeros((X_test.shape[0], n_folds * n_classes)) # Using StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0) for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)): # Split data and target X_tr = X_train[tr_index] y_tr = y_train[tr_index] X_te = X_train[te_index] y_te = y_train[te_index] model = GaussianNB() _ = model.fit(X_tr, y_tr) col_slice_fold = slice(fold_counter * n_classes, fold_counter * n_classes + n_classes) S_test_temp[:, col_slice_fold] = model.predict_proba(X_test) for class_id in range(n_classes): S_test_1_b[:, class_id] = np.mean(S_test_temp[:, class_id::n_classes], axis = 1) model = GaussianNB() S_train_1_b = cross_val_predict(model, X_train, y = y_train, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict_proba') S_train_1 = np.c_[S_train_1_a, S_train_1_b] S_test_1 = np.c_[S_test_1_a, S_test_1_b] models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr'), GaussianNB()] S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, mode = 'oof_pred_bag', random_state = 0, verbose = 0, stratified = True, needs_proba = True) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] #@@@@ # Look at proba # print('\nTwo models') # print('etalon') # print(S_test_1[:2]) # print('vecstack') # print(S_test_2[:2]) #@@@@ assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) def test_N_dim_input(self): """ This is `test_oof_pred_bag_mode` function with `LogisticRegressionUnrolled` estimator """ S_test_temp = np.zeros((X_test_4d_unrolled.shape[0], n_folds)) # Usind StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0) for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train_4d_unrolled, y_train_4d)): # Split data and target X_tr = X_train_4d_unrolled[tr_index] y_tr = y_train_4d[tr_index] X_te = X_train_4d_unrolled[te_index] y_te = y_train_4d[te_index] model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') _ = model.fit(X_tr, y_tr) S_test_temp[:, fold_counter] = model.predict(X_test_4d_unrolled) S_test_1 = st.mode(S_test_temp, axis = 1)[0] model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr') S_train_1 = cross_val_predict(model, X_train_4d_unrolled, y = y_train_4d, cv = n_folds, n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1) models = [LogisticRegressionUnrolled(random_state=0, solver='liblinear', multi_class='ovr')] S_train_2, S_test_2 = stacking(models, X_train_4d, y_train_4d, X_test_4d, regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, mode = 'oof_pred_bag', random_state = 0, verbose = 0, stratified = True) # Load OOF from file # Normally if cleaning is performed there is only one .npy file at given moment # But if we have no cleaning there may be more then one file so we take the latest file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file S = np.load(file_name, allow_pickle=True) S_train_3 = S[0] S_test_3 = S[1] assert_array_equal(S_train_1, S_train_2) assert_array_equal(S_test_1, S_test_2) assert_array_equal(S_train_1, S_train_3) assert_array_equal(S_test_1, S_test_3) #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- if __name__ == '__main__': unittest.main() #------------------------------------------------------------------------------- #-------------------------------------------------------------------------------
test_oof_mode
Video_Generation_with_Detections.py
import cv2 import numpy as np Initial_Frame = 900 Final_Frame = 1190 video_name = 'Sample2 with detections.avi' frame = cv2.imread("image/Seg_frame%d.jpg" % Initial_Frame) height, width, layers = frame.shape
for x in range(Initial_Frame,Final_Frame+1,1): frame = cv2.imread("image/Seg_frame%d.jpg" % x) video.write(frame) print(round(((x - Initial_Frame) / (Final_Frame - Initial_Frame)) * 100, 2), '%') cv2.destroyAllWindows() video.release()
fps = 15 video = cv2.VideoWriter(video_name, 0, fps, (width,height))
config_parser.py
import json class Struct(object): def __init__(self, data): for name, value in data.items(): setattr(self, name, self._wrap(value)) def _wrap(self, value): if isinstance(value, (tuple, list, set, frozenset)): return type(value)([self._wrap(v) for v in value]) else: return Struct(value) if isinstance(value, dict) else value
with open(config_path) as config: cfg = json.load(config, object_hook=Struct)
config_path = "/home/pi/qrcode_detect/cfg.json"
models.rs
#![doc = "generated by AutoRust 0.1.0"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CertificateVerificationDescription { #[serde(default, skip_serializing_if = "Option::is_none")] pub certificate: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CertificateListDescription { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<CertificateDescription>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CertificateBodyDescription { #[serde(default, skip_serializing_if = "Option::is_none")] pub certificate: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CertificateDescription { #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<CertificateProperties>, #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub etag: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CertificateWithNonceDescription { #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<CertificatePropertiesWithNonce>, #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub etag: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SharedAccessSignatureAuthorizationRule { #[serde(rename = "keyName")] pub key_name: String, #[serde(rename = "primaryKey", default, skip_serializing_if = "Option::is_none")] pub primary_key: Option<String>, #[serde(rename = "secondaryKey", default, skip_serializing_if = "Option::is_none")] pub secondary_key: Option<String>, pub rights: shared_access_signature_authorization_rule::Rights, } pub mod shared_access_signature_authorization_rule { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Rights { RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect, #[serde(rename = "RegistryRead, RegistryWrite")] RegistryReadRegistryWrite, #[serde(rename = "RegistryRead, ServiceConnect")] RegistryReadServiceConnect, #[serde(rename = "RegistryRead, DeviceConnect")] RegistryReadDeviceConnect, #[serde(rename = "RegistryWrite, ServiceConnect")] RegistryWriteServiceConnect, #[serde(rename = "RegistryWrite, DeviceConnect")] RegistryWriteDeviceConnect, #[serde(rename = "ServiceConnect, DeviceConnect")] ServiceConnectDeviceConnect, #[serde(rename = "RegistryRead, RegistryWrite, ServiceConnect")] RegistryReadRegistryWriteServiceConnect, #[serde(rename = "RegistryRead, RegistryWrite, DeviceConnect")] RegistryReadRegistryWriteDeviceConnect, #[serde(rename = "RegistryRead, ServiceConnect, DeviceConnect")] RegistryReadServiceConnectDeviceConnect, #[serde(rename = "RegistryWrite, ServiceConnect, DeviceConnect")] RegistryWriteServiceConnectDeviceConnect, #[serde(rename = "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect")] RegistryReadRegistryWriteServiceConnectDeviceConnect, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CertificateProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub subject: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub expiry: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub thumbprint: Option<String>, #[serde(rename = "isVerified", default, skip_serializing_if = "Option::is_none")] pub is_verified: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub created: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub updated: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub certificate: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CertificatePropertiesWithNonce { #[serde(default, skip_serializing_if = "Option::is_none")] pub subject: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub expiry: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub thumbprint: Option<String>, #[serde(rename = "isVerified", default, skip_serializing_if = "Option::is_none")] pub is_verified: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub created: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub updated: Option<String>, #[serde(rename = "verificationCode", default, skip_serializing_if = "Option::is_none")] pub verification_code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub certificate: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IotHubProperties { #[serde(rename = "authorizationPolicies", default, skip_serializing_if = "Vec::is_empty")] pub authorization_policies: Vec<SharedAccessSignatureAuthorizationRule>, #[serde(rename = "publicNetworkAccess", default, skip_serializing_if = "Option::is_none")] pub public_network_access: Option<iot_hub_properties::PublicNetworkAccess>, #[serde(rename = "ipFilterRules", default, skip_serializing_if = "Vec::is_empty")] pub ip_filter_rules: Vec<IpFilterRule>, #[serde(rename = "networkRuleSets", default, skip_serializing_if = "Option::is_none")] pub network_rule_sets: Option<NetworkRuleSetProperties>, #[serde(rename = "minTlsVersion", default, skip_serializing_if = "Option::is_none")] pub min_tls_version: Option<String>, #[serde(rename = "privateEndpointConnections", default, skip_serializing_if = "Vec::is_empty")] pub private_endpoint_connections: Vec<PrivateEndpointConnection>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub state: Option<String>, #[serde(rename = "hostName", default, skip_serializing_if = "Option::is_none")] pub host_name: Option<String>, #[serde(rename = "eventHubEndpoints", default, skip_serializing_if = "Option::is_none")] pub event_hub_endpoints: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub routing: Option<RoutingProperties>, #[serde(rename = "storageEndpoints", default, skip_serializing_if = "Option::is_none")] pub storage_endpoints: Option<serde_json::Value>, #[serde(rename = "messagingEndpoints", default, skip_serializing_if = "Option::is_none")] pub messaging_endpoints: Option<serde_json::Value>, #[serde(rename = "enableFileUploadNotifications", default, skip_serializing_if = "Option::is_none")] pub enable_file_upload_notifications: Option<bool>, #[serde(rename = "cloudToDevice", default, skip_serializing_if = "Option::is_none")] pub cloud_to_device: Option<CloudToDeviceProperties>, #[serde(default, skip_serializing_if = "Option::is_none")] pub comments: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub features: Option<iot_hub_properties::Features>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub locations: Vec<IotHubLocationDescription>, } pub mod iot_hub_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum PublicNetworkAccess { Enabled, Disabled, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Features { None, DeviceManagement, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IotHubSkuInfo { pub name: iot_hub_sku_info::Name, #[serde(default, skip_serializing_if = "Option::is_none")] pub tier: Option<iot_hub_sku_info::Tier>, #[serde(default, skip_serializing_if = "Option::is_none")] pub capacity: Option<i64>, } pub mod iot_hub_sku_info { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Name { F1, S1, S2, S3, B1, B2, B3, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Tier { Free, Standard, Basic, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventHubProperties { #[serde(rename = "retentionTimeInDays", default, skip_serializing_if = "Option::is_none")] pub retention_time_in_days: Option<i64>, #[serde(rename = "partitionCount", default, skip_serializing_if = "Option::is_none")] pub partition_count: Option<i32>, #[serde(rename = "partitionIds", default, skip_serializing_if = "Vec::is_empty")] pub partition_ids: Vec<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub endpoint: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct StorageEndpointProperties { #[serde(rename = "sasTtlAsIso8601", default, skip_serializing_if = "Option::is_none")] pub sas_ttl_as_iso8601: Option<String>, #[serde(rename = "connectionString")] pub connection_string: String, #[serde(rename = "containerName")] pub container_name: String, #[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")] pub authentication_type: Option<storage_endpoint_properties::AuthenticationType>, } pub mod storage_endpoint_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum AuthenticationType { #[serde(rename = "keyBased")] KeyBased, #[serde(rename = "identityBased")] IdentityBased, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MessagingEndpointProperties { #[serde(rename = "lockDurationAsIso8601", default, skip_serializing_if = "Option::is_none")] pub lock_duration_as_iso8601: Option<String>, #[serde(rename = "ttlAsIso8601", default, skip_serializing_if = "Option::is_none")] pub ttl_as_iso8601: Option<String>, #[serde(rename = "maxDeliveryCount", default, skip_serializing_if = "Option::is_none")] pub max_delivery_count: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CloudToDeviceProperties { #[serde(rename = "maxDeliveryCount", default, skip_serializing_if = "Option::is_none")] pub max_delivery_count: Option<i32>, #[serde(rename = "defaultTtlAsIso8601", default, skip_serializing_if = "Option::is_none")] pub default_ttl_as_iso8601: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub feedback: Option<FeedbackProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IpFilterRule { #[serde(rename = "filterName")] pub filter_name: String, pub action: ip_filter_rule::Action, #[serde(rename = "ipMask")] pub ip_mask: String, } pub mod ip_filter_rule { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Action { Accept, Reject, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NetworkRuleSetProperties { #[serde(rename = "defaultAction", default, skip_serializing_if = "Option::is_none")] pub default_action: Option<network_rule_set_properties::DefaultAction>, #[serde(rename = "applyToBuiltInEventHubEndpoint")] pub apply_to_built_in_event_hub_endpoint: bool, #[serde(rename = "ipRules")] pub ip_rules: Vec<NetworkRuleSetIpRule>, } pub mod network_rule_set_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum DefaultAction { Deny, Allow, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NetworkRuleSetIpRule { #[serde(rename = "filterName")] pub filter_name: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub action: Option<network_rule_set_ip_rule::Action>, #[serde(rename = "ipMask")] pub ip_mask: String, } pub mod network_rule_set_ip_rule { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Action { Allow, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateLinkResources { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<GroupIdInformation>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GroupIdInformation { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, pub properties: GroupIdInformationProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GroupIdInformationProperties { #[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")] pub group_id: Option<String>, #[serde(rename = "requiredMembers", default, skip_serializing_if = "Vec::is_empty")] pub required_members: Vec<String>, #[serde(rename = "requiredZoneNames", default, skip_serializing_if = "Vec::is_empty")] pub required_zone_names: Vec<String>, } pub type PrivateEndpointConnectionsList = Vec<PrivateEndpointConnection>; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateEndpointConnection { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, pub properties: PrivateEndpointConnectionProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateEndpointConnectionProperties { #[serde(rename = "privateEndpoint", default, skip_serializing_if = "Option::is_none")] pub private_endpoint: Option<PrivateEndpoint>, #[serde(rename = "privateLinkServiceConnectionState")] pub private_link_service_connection_state: PrivateLinkServiceConnectionState, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateEndpoint { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateLinkServiceConnectionState { pub status: private_link_service_connection_state::Status, pub description: String, #[serde(rename = "actionsRequired", default, skip_serializing_if = "Option::is_none")] pub actions_required: Option<String>, } pub mod private_link_service_connection_state { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { Pending, Approved, Rejected, Disconnected, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct FeedbackProperties { #[serde(rename = "lockDurationAsIso8601", default, skip_serializing_if = "Option::is_none")] pub lock_duration_as_iso8601: Option<String>, #[serde(rename = "ttlAsIso8601", default, skip_serializing_if = "Option::is_none")] pub ttl_as_iso8601: Option<String>, #[serde(rename = "maxDeliveryCount", default, skip_serializing_if = "Option::is_none")] pub max_delivery_count: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RoutingProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub endpoints: Option<RoutingEndpoints>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub routes: Vec<RouteProperties>, #[serde(rename = "fallbackRoute", default, skip_serializing_if = "Option::is_none")] pub fallback_route: Option<FallbackRouteProperties>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub enrichments: Vec<EnrichmentProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RoutingEndpoints { #[serde(rename = "serviceBusQueues", default, skip_serializing_if = "Vec::is_empty")] pub service_bus_queues: Vec<RoutingServiceBusQueueEndpointProperties>, #[serde(rename = "serviceBusTopics", default, skip_serializing_if = "Vec::is_empty")] pub service_bus_topics: Vec<RoutingServiceBusTopicEndpointProperties>, #[serde(rename = "eventHubs", default, skip_serializing_if = "Vec::is_empty")] pub event_hubs: Vec<RoutingEventHubProperties>, #[serde(rename = "storageContainers", default, skip_serializing_if = "Vec::is_empty")] pub storage_containers: Vec<RoutingStorageContainerProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RoutingServiceBusQueueEndpointProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")] pub connection_string: Option<String>, #[serde(rename = "endpointUri", default, skip_serializing_if = "Option::is_none")] pub endpoint_uri: Option<String>, #[serde(rename = "entityPath", default, skip_serializing_if = "Option::is_none")] pub entity_path: Option<String>, #[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")] pub authentication_type: Option<routing_service_bus_queue_endpoint_properties::AuthenticationType>, pub name: String, #[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")] pub subscription_id: Option<String>, #[serde(rename = "resourceGroup", default, skip_serializing_if = "Option::is_none")] pub resource_group: Option<String>, } pub mod routing_service_bus_queue_endpoint_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum AuthenticationType { #[serde(rename = "keyBased")] KeyBased, #[serde(rename = "identityBased")] IdentityBased, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RoutingServiceBusTopicEndpointProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")] pub connection_string: Option<String>, #[serde(rename = "endpointUri", default, skip_serializing_if = "Option::is_none")] pub endpoint_uri: Option<String>, #[serde(rename = "entityPath", default, skip_serializing_if = "Option::is_none")] pub entity_path: Option<String>, #[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")] pub authentication_type: Option<routing_service_bus_topic_endpoint_properties::AuthenticationType>, pub name: String, #[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")] pub subscription_id: Option<String>, #[serde(rename = "resourceGroup", default, skip_serializing_if = "Option::is_none")] pub resource_group: Option<String>, } pub mod routing_service_bus_topic_endpoint_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum AuthenticationType { #[serde(rename = "keyBased")] KeyBased, #[serde(rename = "identityBased")] IdentityBased, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RoutingEventHubProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")] pub connection_string: Option<String>, #[serde(rename = "endpointUri", default, skip_serializing_if = "Option::is_none")] pub endpoint_uri: Option<String>, #[serde(rename = "entityPath", default, skip_serializing_if = "Option::is_none")] pub entity_path: Option<String>, #[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")] pub authentication_type: Option<routing_event_hub_properties::AuthenticationType>, pub name: String, #[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")] pub subscription_id: Option<String>, #[serde(rename = "resourceGroup", default, skip_serializing_if = "Option::is_none")] pub resource_group: Option<String>, } pub mod routing_event_hub_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum AuthenticationType { #[serde(rename = "keyBased")] KeyBased, #[serde(rename = "identityBased")] IdentityBased, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RoutingStorageContainerProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")] pub connection_string: Option<String>, #[serde(rename = "endpointUri", default, skip_serializing_if = "Option::is_none")] pub endpoint_uri: Option<String>, #[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")] pub authentication_type: Option<routing_storage_container_properties::AuthenticationType>, pub name: String, #[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")] pub subscription_id: Option<String>, #[serde(rename = "resourceGroup", default, skip_serializing_if = "Option::is_none")] pub resource_group: Option<String>, #[serde(rename = "containerName")] pub container_name: String, #[serde(rename = "fileNameFormat", default, skip_serializing_if = "Option::is_none")] pub file_name_format: Option<String>, #[serde(rename = "batchFrequencyInSeconds", default, skip_serializing_if = "Option::is_none")] pub batch_frequency_in_seconds: Option<i32>, #[serde(rename = "maxChunkSizeInBytes", default, skip_serializing_if = "Option::is_none")] pub max_chunk_size_in_bytes: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub encoding: Option<routing_storage_container_properties::Encoding>, } pub mod routing_storage_container_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum AuthenticationType { #[serde(rename = "keyBased")] KeyBased, #[serde(rename = "identityBased")] IdentityBased, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Encoding { Avro, AvroDeflate, #[serde(rename = "JSON")] Json, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RouteProperties { pub name: String, pub source: route_properties::Source, #[serde(default, skip_serializing_if = "Option::is_none")] pub condition: Option<String>, #[serde(rename = "endpointNames")] pub endpoint_names: Vec<String>, #[serde(rename = "isEnabled")] pub is_enabled: bool, } pub mod route_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Source { Invalid, DeviceMessages, TwinChangeEvents, DeviceLifecycleEvents, DeviceJobLifecycleEvents, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct FallbackRouteProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, pub source: fallback_route_properties::Source, #[serde(default, skip_serializing_if = "Option::is_none")] pub condition: Option<String>, #[serde(rename = "endpointNames")] pub endpoint_names: Vec<String>, #[serde(rename = "isEnabled")] pub is_enabled: bool, } pub mod fallback_route_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Source { DeviceMessages, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnrichmentProperties { pub key: String, pub value: String, #[serde(rename = "endpointNames")] pub endpoint_names: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IotHubDescription { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub etag: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<IotHubProperties>, pub sku: IotHubSkuInfo, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Resource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, pub location: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SharedAccessSignatureAuthorizationRuleListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<SharedAccessSignatureAuthorizationRule>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Operation>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Operation { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub display: Option<operation::Display>, } pub mod operation { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Display { #[serde(default, skip_serializing_if = "Option::is_none")] pub provider: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub resource: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub operation: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorDetails { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(rename = "httpStatusCode", default, skip_serializing_if = "Option::is_none")] pub http_status_code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub details: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IotHubQuotaMetricInfoListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<IotHubQuotaMetricInfo>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EndpointHealthDataListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<EndpointHealthData>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EndpointHealthData { #[serde(rename = "endpointId", default, skip_serializing_if = "Option::is_none")] pub endpoint_id: Option<String>, #[serde(rename = "healthStatus", default, skip_serializing_if = "Option::is_none")] pub health_status: Option<endpoint_health_data::HealthStatus>, #[serde(rename = "lastKnownError", default, skip_serializing_if = "Option::is_none")] pub last_known_error: Option<String>, #[serde(rename = "lastKnownErrorTime", default, skip_serializing_if = "Option::is_none")] pub last_known_error_time: Option<String>, #[serde(rename = "lastSuccessfulSendAttemptTime", default, skip_serializing_if = "Option::is_none")] pub last_successful_send_attempt_time: Option<String>, #[serde(rename = "lastSendAttemptTime", default, skip_serializing_if = "Option::is_none")] pub last_send_attempt_time: Option<String>, } pub mod endpoint_health_data { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum HealthStatus { #[serde(rename = "unknown")] Unknown, #[serde(rename = "healthy")] Healthy, #[serde(rename = "degraded")] Degraded, #[serde(rename = "unhealthy")] Unhealthy, #[serde(rename = "dead")] Dead, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegistryStatistics { #[serde(rename = "totalDeviceCount", default, skip_serializing_if = "Option::is_none")] pub total_device_count: Option<i64>, #[serde(rename = "enabledDeviceCount", default, skip_serializing_if = "Option::is_none")] pub enabled_device_count: Option<i64>, #[serde(rename = "disabledDeviceCount", default, skip_serializing_if = "Option::is_none")] pub disabled_device_count: Option<i64>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JobResponseListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<JobResponse>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IotHubSkuDescription { #[serde(rename = "resourceType", default, skip_serializing_if = "Option::is_none")] pub resource_type: Option<String>, pub sku: IotHubSkuInfo, pub capacity: IotHubCapacity, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TagsResource { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IotHubCapacity { #[serde(default, skip_serializing_if = "Option::is_none")] pub minimum: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub maximum: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub default: Option<i64>, #[serde(rename = "scaleType", default, skip_serializing_if = "Option::is_none")] pub scale_type: Option<iot_hub_capacity::ScaleType>, } pub mod iot_hub_capacity { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ScaleType { Automatic, Manual, None, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventHubConsumerGroupsListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<EventHubConsumerGroupInfo>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventHubConsumerGroupBodyDescription { #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<EventHubConsumerGroupName>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventHubConsumerGroupName { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventHubConsumerGroupInfo { #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub etag: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IotHubSkuDescriptionListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<IotHubSkuDescription>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JobResponse { #[serde(rename = "jobId", default, skip_serializing_if = "Option::is_none")] pub job_id: Option<String>, #[serde(rename = "startTimeUtc", default, skip_serializing_if = "Option::is_none")] pub start_time_utc: Option<String>, #[serde(rename = "endTimeUtc", default, skip_serializing_if = "Option::is_none")] pub end_time_utc: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<job_response::Type>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<job_response::Status>, #[serde(rename = "failureReason", default, skip_serializing_if = "Option::is_none")] pub failure_reason: Option<String>, #[serde(rename = "statusMessage", default, skip_serializing_if = "Option::is_none")] pub status_message: Option<String>, #[serde(rename = "parentJobId", default, skip_serializing_if = "Option::is_none")] pub parent_job_id: Option<String>, } pub mod job_response { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Type { #[serde(rename = "unknown")] Unknown, #[serde(rename = "export")] Export, #[serde(rename = "import")] Import, #[serde(rename = "backup")] Backup, #[serde(rename = "readDeviceProperties")] ReadDeviceProperties, #[serde(rename = "writeDeviceProperties")] WriteDeviceProperties, #[serde(rename = "updateDeviceConfiguration")] UpdateDeviceConfiguration, #[serde(rename = "rebootDevice")] RebootDevice, #[serde(rename = "factoryResetDevice")] FactoryResetDevice, #[serde(rename = "firmwareUpdate")] FirmwareUpdate, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { #[serde(rename = "unknown")] Unknown, #[serde(rename = "enqueued")] Enqueued, #[serde(rename = "running")] Running, #[serde(rename = "completed")] Completed, #[serde(rename = "failed")] Failed, #[serde(rename = "cancelled")] Cancelled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IotHubDescriptionListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<IotHubDescription>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IotHubQuotaMetricInfo { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "currentValue", default, skip_serializing_if = "Option::is_none")] pub current_value: Option<i64>, #[serde(rename = "maxValue", default, skip_serializing_if = "Option::is_none")] pub max_value: Option<i64>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationInputs { pub name: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IotHubNameAvailabilityInfo { #[serde(rename = "nameAvailable", default, skip_serializing_if = "Option::is_none")] pub name_available: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub reason: Option<iot_hub_name_availability_info::Reason>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, } pub mod iot_hub_name_availability_info { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Reason { Invalid, AlreadyExists, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UserSubscriptionQuotaListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<UserSubscriptionQuota>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UserSubscriptionQuota { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option<String>, #[serde(rename = "currentValue", default, skip_serializing_if = "Option::is_none")] pub current_value: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub limit: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<Name>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Name { #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<String>, #[serde(rename = "localizedValue", default, skip_serializing_if = "Option::is_none")] pub localized_value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TestAllRoutesInput { #[serde(rename = "routingSource", default, skip_serializing_if = "Option::is_none")] pub routing_source: Option<test_all_routes_input::RoutingSource>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<RoutingMessage>, #[serde(default, skip_serializing_if = "Option::is_none")] pub twin: Option<RoutingTwin>, } pub mod test_all_routes_input { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum RoutingSource { Invalid, DeviceMessages, TwinChangeEvents, DeviceLifecycleEvents, DeviceJobLifecycleEvents, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RoutingTwin { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<routing_twin::Properties>, } pub mod routing_twin { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct
{ #[serde(default, skip_serializing_if = "Option::is_none")] pub desired: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub reported: Option<serde_json::Value>, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RoutingMessage { #[serde(default, skip_serializing_if = "Option::is_none")] pub body: Option<String>, #[serde(rename = "appProperties", default, skip_serializing_if = "Option::is_none")] pub app_properties: Option<serde_json::Value>, #[serde(rename = "systemProperties", default, skip_serializing_if = "Option::is_none")] pub system_properties: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TestAllRoutesResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub routes: Vec<MatchedRoute>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MatchedRoute { #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<RouteProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TestRouteInput { #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<RoutingMessage>, pub route: RouteProperties, #[serde(default, skip_serializing_if = "Option::is_none")] pub twin: Option<RoutingTwin>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TestRouteResult { #[serde(default, skip_serializing_if = "Option::is_none")] pub result: Option<test_route_result::Result>, #[serde(default, skip_serializing_if = "Option::is_none")] pub details: Option<TestRouteResultDetails>, } pub mod test_route_result { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Result { #[serde(rename = "undefined")] Undefined, #[serde(rename = "false")] False, #[serde(rename = "true")] True, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TestRouteResultDetails { #[serde(rename = "compilationErrors", default, skip_serializing_if = "Vec::is_empty")] pub compilation_errors: Vec<RouteCompilationError>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RouteCompilationError { #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub severity: Option<route_compilation_error::Severity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<RouteErrorRange>, } pub mod route_compilation_error { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Severity { #[serde(rename = "error")] Error, #[serde(rename = "warning")] Warning, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RouteErrorRange { #[serde(default, skip_serializing_if = "Option::is_none")] pub start: Option<RouteErrorPosition>, #[serde(default, skip_serializing_if = "Option::is_none")] pub end: Option<RouteErrorPosition>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RouteErrorPosition { #[serde(default, skip_serializing_if = "Option::is_none")] pub line: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub column: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ExportDevicesRequest { #[serde(rename = "exportBlobContainerUri")] pub export_blob_container_uri: String, #[serde(rename = "excludeKeys")] pub exclude_keys: bool, #[serde(rename = "exportBlobName", default, skip_serializing_if = "Option::is_none")] pub export_blob_name: Option<String>, #[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")] pub authentication_type: Option<export_devices_request::AuthenticationType>, #[serde(rename = "includeConfigurations", default, skip_serializing_if = "Option::is_none")] pub include_configurations: Option<bool>, #[serde(rename = "configurationsBlobName", default, skip_serializing_if = "Option::is_none")] pub configurations_blob_name: Option<String>, } pub mod export_devices_request { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum AuthenticationType { #[serde(rename = "keyBased")] KeyBased, #[serde(rename = "identityBased")] IdentityBased, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ImportDevicesRequest { #[serde(rename = "inputBlobContainerUri")] pub input_blob_container_uri: String, #[serde(rename = "outputBlobContainerUri")] pub output_blob_container_uri: String, #[serde(rename = "inputBlobName", default, skip_serializing_if = "Option::is_none")] pub input_blob_name: Option<String>, #[serde(rename = "outputBlobName", default, skip_serializing_if = "Option::is_none")] pub output_blob_name: Option<String>, #[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")] pub authentication_type: Option<import_devices_request::AuthenticationType>, #[serde(rename = "includeConfigurations", default, skip_serializing_if = "Option::is_none")] pub include_configurations: Option<bool>, #[serde(rename = "configurationsBlobName", default, skip_serializing_if = "Option::is_none")] pub configurations_blob_name: Option<String>, } pub mod import_devices_request { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum AuthenticationType { #[serde(rename = "keyBased")] KeyBased, #[serde(rename = "identityBased")] IdentityBased, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct FailoverInput { #[serde(rename = "failoverRegion")] pub failover_region: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IotHubLocationDescription { #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub role: Option<iot_hub_location_description::Role>, } pub mod iot_hub_location_description { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Role { #[serde(rename = "primary")] Primary, #[serde(rename = "secondary")] Secondary, } }
Properties
neobot.py
# -*- coding: utf-8 -*- import functions #import download_function #Listener def listener(messages): for m in messages: cid = m.chat.id if m.content_type == 'text': print ("[" + str(cid) + "]: " + m.text) functions.bot.set_update_listener(listener)
#Bot starts here print('Bot Started') functions.bot.polling(none_stop=True) #download_funtion.polling(none_stop=True)
#############################################
test_cps_units_checker.py
#!/usr/local/bin/python import sys # sys.path.append('/Users/jore/courses/NIMBUS/RESEARCH/CPS_TYPES/cps_units/') import unittest from detect_physical_unit_inconsistencies import CPSUnitsChecker from unit_error_types import UnitErrorTypes from unit_error import UnitError import os global_debug = False global_debug_verbose = False global_debug_AST = False class TestStringMethods(unittest.TestCase): def test_function_return_0(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False cps_unit_checker.debug_scope = False dump_file = './dump_files_for_tests/test_it_function_return_0.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) units_for_f1 = [] # TEST THAT UNITS ARE ASSIGNED TO FUNCTION for tw in cps_unit_checker.all_tree_walkers: so = tw.symbol_helper.function_dictionary['scopeObject'] if so.function: if so.function.name == 'f1': units_for_f1 = so.function.return_units self.assertEquals(units_for_f1, [{'meter': 1}], 'Incorrect units returned for function: f1 . Expected [{\'meter\':1}], received %s' % units_for_f1) # TEST THAT UNITS ARE RECEIVED TO FUNCTION actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'x' in s.var_ordered_dict: actual_units = s.var_ordered_dict['x'][12]['units'] my_oracle = [{'meter': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_function_return_1(self): ''' x SHOULD END UP M/S, but so far THERE'S NO MECHANISM FOR PASSING UNITS IN TO A FUNCTION ''' cps_unit_checker = CPSUnitsChecker() dump_file = './dump_files_for_tests/test_it_function_return_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) for tw in cps_unit_checker.all_tree_walkers: so = tw.symbol_helper.function_dictionary['scopeObject'] if so.function: if so.function.name == 'f1': units_for_f1 = so.function.return_units def test_comparisons_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST= False dump_file = './dump_files_for_tests/test_it_comparisons_1.cpp.dump' source_file = './dump_files_for_tests/test_it_comparisons_1.cpp' cps_unit_checker.main_run_check(dump_file, source_file) e = cps_unit_checker.errors[0] # ORACLES token_left_units_oracle = [{'meter': 1}] token_right_units_oracle = [{'second': -1, 'meter': 1}] # ASSERTIONS self.assertEqual(e.token.str, '>') self.assertEqual(e.token_left.units, token_left_units_oracle) self.assertEqual(e.token_right.units, token_right_units_oracle) def test_logical_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_logical_1.cpp.dump' source_file = './dump_files_for_tests/test_it_logical_1.cpp' cps_unit_checker.main_run_check(dump_file, source_file) # TEST 1 e = cps_unit_checker.errors[0] # ORACLES token_right_units_oracle = [{'meter': 1}] # ASSERTIONS self.assertEqual(e.linenr, 13) self.assertEqual(e.token.str, '&&') self.assertEqual(e.token_right.units, token_right_units_oracle) # TEST 2 e = cps_unit_checker.errors[1] # ORACLES token_left_units_oracle = [{'meter': 1}] # ASSERTIONS self.assertEqual(e.linenr, 18) self.assertEqual(e.token.str, '||') self.assertEqual(e.token_left.units, token_left_units_oracle) def test_abs_0(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False #cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_abs.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) def test_abs_namespace_std_0(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False #cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_abs_namespace_std.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) def test_abs_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False #cps_unit_checker.debug_verbose = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_abs_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 't' var_linenr = 9 actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] my_oracle = [{'second': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_abs_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_abs_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 's' var_linenr =11 my_oracle = [{'meter': 1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_multiplication_assignment_in_multi_configurations_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False #cps_unit_checker.debug_verbose = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_multiplication_assignment_in_multi_configurations.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'a_geometry_msgs_Accel.linear.x' var_linenr = 19 my_oracle = [{'second': -4, 'meter': 2}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_unit_propagation_by_multiplication_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False #cps_unit_checker.debug_verbose = False #cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_unit_propagation_by_multiplication_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'f' in s.var_ordered_dict: actual_units = s.var_ordered_dict['f'][14]['units'] my_oracle = [{'second': -4, 'meter': 2}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_unit_propagation_by_division_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False #cps_unit_checker.debug_verbose = False #cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_unit_propagation_by_division_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'f' in s.var_ordered_dict: actual_units = s.var_ordered_dict['f'][14]['units'] my_oracle = None self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_mulitple_units_assigned(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False #cps_unit_checker.debug_verbose = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_multiple_units_assigned_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) expected_errors = ["test_it_multiple_units_assigned_1.cpp : 11 MULTIPLE UNITS BY ASSIGNMENT: [{'second': -1, 'meter': 1}, {'second': -2, 'meter': 2}]"] # self.assertListEqual([e['error_msg'] for e in cps_unit_checker.errors], expected_errors) # TEST QUANTITY OF ERRORS self.assertEqual(1, len(cps_unit_checker.errors)) # TEST TyPE OF ERROR self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) # TEST VALUE OF ERROR var_name = 'a_geometry_msgs_Accel.linear.x' var_linenr =11 my_oracle = [{'second': -2, 'meter': 1}, {'second': -4, 'meter': 2}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_known_functions_sqrt_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False #cps_unit_checker.debug_verbose = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_sqrt_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'x' in s.var_ordered_dict: actual_units = s.var_ordered_dict['x'][12]['units'] my_oracle = [{'meter': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_known_functions_sqrt_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False #cps_unit_checker.debug_verbose = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_sqrt_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'x' in s.var_ordered_dict: actual_units = s.var_ordered_dict['x'][12]['units'] my_oracle = [{'second': -1, 'meter': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_known_functions_sqrt_3(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False #cps_unit_checker.debug_verbose = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_sqrt_3.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'x' in s.var_ordered_dict: actual_units = s.var_ordered_dict['x'][12]['units'] my_oracle = None self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_known_functions_sqrt_4(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False #cps_unit_checker.debug_verbose = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_sqrt_4.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'x' in s.var_ordered_dict: actual_units = s.var_ordered_dict['x'][14]['units'] my_oracle = [{'meter': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_known_functions_sqrt_5(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False #cps_unit_checker.debug_verbose = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_sqrt_5.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'x' in s.var_ordered_dict: actual_units = s.var_ordered_dict['x'][14]['units'] my_oracle = [{'meter': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_known_functions_sqrt_half_units(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False #cps_unit_checker.debug_verbose = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_sqrt_half_units.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'x' in s.var_ordered_dict: actual_units = s.var_ordered_dict['x'][11]['units'] my_oracle = [{'meter': 0.5}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def
(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_verbose = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_atan2_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'f' in s.var_ordered_dict: actual_units = s.var_ordered_dict['f'][7]['units'] my_oracle = [{'radian': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_known_functions_atan2_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False #cps_unit_checker.debug_verbose = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_atan2_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'f' in s.var_ordered_dict: actual_units = s.var_ordered_dict['f'][8]['units'] my_oracle = [{'radian': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_toSec(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_toSec_0.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'duration' in s.var_ordered_dict: actual_units = s.var_ordered_dict['duration'][7]['units'] my_oracle = [{'second': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'second' in s.var_ordered_dict: actual_units = s.var_ordered_dict['second'][9]['units'] my_oracle = [{'second': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_float_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_float_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'f' in s.var_ordered_dict and 11 in s.var_ordered_dict['f']: actual_units = s.var_ordered_dict['f'][11]['units'] my_oracle = [{'second': -1, 'meter': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_float_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_float_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'f' in s.var_ordered_dict and 11 in s.var_ordered_dict['f']: actual_units = s.var_ordered_dict['f'][11]['units'] my_oracle = [{'second': -1, 'meter': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_float_3(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_float_3.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'f' in s.var_ordered_dict: actual_units = s.var_ordered_dict['f'][12]['units'] my_oracle = [{'second': -1, 'meter': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_float_4(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_float_4.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'f' in s.var_ordered_dict: actual_units = s.var_ordered_dict['f'][13]['units'] my_oracle = [{'second': -1, 'meter': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_float_5(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_float_5.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if 'f' in s.var_ordered_dict: actual_units = s.var_ordered_dict['f'][11]['units'] my_oracle = [{'second': -1, 'meter': 1}] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_pow_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_pow_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'f' var_linenr =10 my_oracle = [{'meter': 4}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_pow_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_pow_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'f' var_linenr = 11 my_oracle = [{'meter': 4}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_pow_3(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_pow_3.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'f' var_linenr = 10 my_oracle = [{'meter': 4}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:%s Expected: %s received %s' % (var_name, my_oracle, actual_units)) def test_floor_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_floor_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 's' var_linenr = 8 my_oracle = [{'meter': 1, 'second':-1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_ceil_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_ceil_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 's' var_linenr = 8 my_oracle = [{'meter': 1, 'second':-1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_acos_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_acos_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'f' var_linenr = 7 my_oracle = [{'radian': 1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_asin_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_asin_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'f' var_linenr = 7 my_oracle = [{'radian': 1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_atan_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_known_function_atan_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'f' var_linenr = 7 my_oracle = [{'radian': 1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_ternary_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_ternary_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'f' var_linenr = 9 my_oracle = [{'second': -1, 'meter': 1}, {'second': -1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_function_args_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_function_args_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) # actual_units = None f = cps_unit_checker.current_configuration.functions[0].arg_units self.assertEqual(f[0][0]['linenr'], 13) self.assertEqual(f[0][0]['units'], [{'meter': 1}]) def test_function_args_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_function_args_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) for f in cps_unit_checker.current_configuration.functions[0].arg_units: self.assertEqual(f[0]['linenr'], 13) self.assertEqual(f[0]['units'], [{'meter': 1}]) def test_function_args_3(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_function_args_3.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) my_oracle_1 = 4 my_oracle_2 = [{'meter': 1}] actual_units = None all_units_list = cps_unit_checker.current_configuration.functions[0].arg_units self.assertEqual(len(all_units_list), my_oracle_1) for u in all_units_list: self.assertEqual(u[0]['units'], my_oracle_2) def test_function_args_4(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_function_args_4.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) my_oracle = [{'meter': 1}] actual_units = None for f in cps_unit_checker.current_configuration.functions: for arg_u in f.arg_units: for arg_use_on_line in arg_u: self.assertEqual(arg_use_on_line['units'], my_oracle) def test_function_args_5(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_function_args_5.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) my_oracle_1 = [{'meter': 1}] my_oracle_2 = 15 f = cps_unit_checker.current_configuration.functions[0] self.assertEqual(f.arg_units[0][0]['units'], my_oracle_1) self.assertEqual(f.arg_units[0][0]['linenr'], my_oracle_2) def test_division_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_division_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'x' var_linenr = 9 my_oracle = [{'meter': 1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_division_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_division_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'x' var_linenr = 9 my_oracle = [{'meter': 1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_division_3(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_division_3.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'x' var_linenr = 9 my_oracle = [{'second': 2, 'meter': 1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_division_4(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_division_4.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'x' var_linenr =10 my_oracle = [{'second': 2}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_logical_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_logical_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) def test_error_type_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_error_return_type_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.current_file_under_analysis = dump_file cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(1, len(cps_unit_checker.errors)) def test_laser_scan_range_size_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_laser_scan_range_count_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'x' var_linenr = 7 my_oracle = None actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertEqual(0, len(cps_unit_checker.errors)) def test_laser_scan_range_size_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_laser_scan_range_count_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'x' var_linenr = 7 my_oracle = [{'meter':1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_ros_duration_isZero_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_ros_duration_isZero_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 't' var_linenr = 6 my_oracle = None actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_ros_duration_isZero_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_ros_duration_isZero_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 't' var_linenr = 6 my_oracle = [{'second':1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) def test_ros_header_include_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/src/test_it_header_include_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(2, len(cps_unit_checker.errors)) def test_ros_header_include_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/src/test_it_header_include_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(3, len(cps_unit_checker.errors)) # WEAKER - SOMETHING STOCASTIC IS HAPPENING e = cps_unit_checker.errors[0] self.assertEqual(7, e.linenr) self.assertEqual('./dump_files_for_tests/src/../include/test_it_header_include_2.h', e.get_file_URI_where_error_occured()) e = cps_unit_checker.errors[1] self.assertEqual(5, e.linenr) self.assertEqual('./dump_files_for_tests/src/test_it_header_include_2.cpp', e.get_file_URI_where_error_occured()) # DON'T ASSIGN UNITS TO ARRAYS WHEN array.empty() IS CALLED def test_laser_range_empty_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_range_empty_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # DON'T ASSIGN UNITS TO ARRAYS WHEN time.isZero() IS CALLED def test_ros_isZero_3(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_ros_isZero_3.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # DON'T ASSIGN UNITS DURING x = y = z = 0 def test_multiple_initialization_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_multiple_initialization.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # WEAKEN ASSIGNMENT WHEN MULTIPLIED BY A CONSTANT (INT) def test_it_multiplication_with_constant_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_multiplication_with_constant_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) # self.assertEqual(0, len(cps_unit_checker.errors)) var_name = 'f' var_linenr = 9 my_oracle = [{'second':-1}] actual_units = None is_unit_propagation_based_on_constants = False for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] is_unit_propagation_based_on_constants = s.var_ordered_dict[var_name][var_linenr]['is_unit_propagation_based_on_constants'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertTrue(is_unit_propagation_based_on_constants, 'Unit inference should be weakened by constant interaction, but is still strong.') # WEAKEN ASSIGNMENT WHEN MULTIPLIED BY A CONSTANT (FLOAT) def test_it_multiplication_with_constant_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_multiplication_with_constant_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) # self.assertEqual(0, len(cps_unit_checker.errors)) var_name = 'f' var_linenr = 9 my_oracle = [{'second':-1}] actual_units = None is_unit_propagation_based_on_constants = False for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] is_unit_propagation_based_on_constants = s.var_ordered_dict[var_name][var_linenr]['is_unit_propagation_based_on_constants'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertTrue(is_unit_propagation_based_on_constants, 'Unit inference should be weakened by constant interaction, but is still strong.') # WEAKEN ASSIGNMENT WHEN MULTIPLIED BY A CONSTANT (FLOAT) def test_it_operator_with_unknown_variable_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_operator_with_unknown_variable_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) # self.assertEqual(0, len(cps_unit_checker.errors)) var_name = 'f' var_linenr = 10 my_oracle = [{'second':-1}] actual_units = None is_unit_propagation_based_on_unknown_variable = False for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] is_unit_propagation_based_on_unknown_variable = s.var_ordered_dict[var_name][var_linenr]['is_unit_propagation_based_on_unknown_variable'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertTrue(is_unit_propagation_based_on_unknown_variable, 'Unit inference should be weakened by unknown variable interaction, but is still strong.') # WEAKEN ERROR WHEN MULTIPLIED BY A CONSTANT def test_it_operator_with_unknown_variable_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_operator_with_unknown_variable_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(2, len(cps_unit_checker.errors)) for e in cps_unit_checker.errors: self.assertTrue(e.is_warning, 'Should be a warning but is not marked as such') # WEAKEN ERROR WHEN MULTIPLIED BY A CONSTANT def test_it_operator_with_unknown_variable_3(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_operator_with_unknown_variable_3.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(2, len(cps_unit_checker.errors)) # PROPAGATION ACROSS MIN MAX def test_it_min_max_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_min_max_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'f' var_linenr = 7 my_oracle = [{'second': -1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertEqual(0, len(cps_unit_checker.errors)) # PROPAGATION ACROSS MIN MAX def test_it_min_max_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_min_max_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'f' var_linenr = 8 my_oracle = [{'second': -1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertEqual(0, len(cps_unit_checker.errors)) # PROPAGATION ACROSS MIN MAX def test_it_min_max_3(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_min_max_3.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'f' var_linenr = 8 my_oracle = [{'second': -1}, {'second': -1, 'meter': 1}] actual_units = None for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units) self.assertFalse(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable) self.assertFalse(cps_unit_checker.errors[0].is_warning) # PROPAGATION ACROSS MIN MAX def test_it_min_max_4(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_min_max_4.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'f' var_linenr = 9 my_oracle = [{'second': -1 }, {'second': -1, 'meter': 1}] actual_units = None is_unit_propagation_based_on_unknown_variable = False for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units) self.assertFalse(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable) self.assertFalse(cps_unit_checker.errors[0].is_warning) # PROTECTION AGAINST MULTILINE def test_it_multiline_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_multiline_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'f' var_linenr = 25 my_oracle = [{'second': -1.0, 'meter': 1.0}, {'second': -2.0, 'meter': 2.0}, {'second': -3.0, 'meter': 3.0}, {'second': -2.0, 'meter': 1.0}, {'second': -3.0, 'meter': 2.0}, {'second': -4.0, 'meter': 3.0}] actual_units = None # is_unit_propagation_based_on_unknown_variable = False for s in cps_unit_checker.current_configuration.scopes: if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) # KNOW FUNCTION quatToRPY def test_it_quatToRPY_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_quatToRPY_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'tw.linear.x' var_linenr = 17 my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian': 1.0}] actual_units = None # is_unit_propagation_based_on_unknown_variable = False for s in cps_unit_checker.current_configuration.scopes: # for v in s.var_ordered_dict: # print v if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units) # WEAK INFERENCE WARNING def test_it_weak_inference_multiplication_1 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_weak_inference_multiplication_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) # for e in cps_unit_checker.errors: # print '\nweak inference: %s warning:%s ' % (e.var_name, str(e.is_warning)) var_name = 'tw.linear.x' var_linenr = 19 my_oracle = [{'second': -1.0, 'meter': 1.0}] actual_units = None # is_unit_propagation_based_on_unknown_variable = False for s in cps_unit_checker.current_configuration.scopes: # for v in s.var_ordered_dict: # print v if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertEqual(0, len(cps_unit_checker.errors)) # WEAK INFERENCE WARNING def test_it_weak_inference_multiplication_2 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_weak_inference_multiplication_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'tw.linear.x' var_linenr = 22 my_oracle = [{'second': -1.0, 'meter': 1.0}] actual_units = None # is_unit_propagation_based_on_unknown_variable = False for s in cps_unit_checker.current_configuration.scopes: # for v in s.var_ordered_dict: # print v if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertEqual(0, len(cps_unit_checker.errors)) # STRONG INFERENCE BECAUSE ADDITION def test_it_weak_inference_addition_1 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_weak_inference_addition_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) # for e in cps_unit_checker.errors: # print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning)) var_name = 'tw.linear.x' var_linenr = 22 my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1}] actual_units = None # is_unit_propagation_based_on_unknown_variable = False for s in cps_unit_checker.current_configuration.scopes: # for v in s.var_ordered_dict: # print v if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertEqual(2, len(cps_unit_checker.errors)) self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units) self.assertFalse(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable) self.assertFalse(cps_unit_checker.errors[0].is_warning) var_name = 'tw.linear.y' var_linenr = 23 my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1}] actual_units = None # is_unit_propagation_based_on_unknown_variable = False for s in cps_unit_checker.current_configuration.scopes: # for v in s.var_ordered_dict: # print v if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertTrue(cps_unit_checker.errors[1].was_assigned_mutiple_units) self.assertFalse(cps_unit_checker.errors[1].is_unit_propagation_based_on_unknown_variable) self.assertFalse(cps_unit_checker.errors[1].is_warning) # STRONG INFERENCE BECAUSE ADDITION - SWAPPED OPERAND ORDER def test_it_weak_inference_addition_2 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_weak_inference_addition_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) # for e in cps_unit_checker.errors: # print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning)) var_name = 'tw.linear.x' var_linenr = 22 my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1}] actual_units = None # is_unit_propagation_based_on_unknown_variable = False for s in cps_unit_checker.current_configuration.scopes: # for v in s.var_ordered_dict: # print v if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertEqual(2, len(cps_unit_checker.errors)) self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units) self.assertFalse(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable) self.assertFalse(cps_unit_checker.errors[0].is_warning) var_name = 'tw.linear.y' var_linenr = 23 my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1}] actual_units = None # is_unit_propagation_based_on_unknown_variable = False for s in cps_unit_checker.current_configuration.scopes: # for v in s.var_ordered_dict: # print v if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertTrue(cps_unit_checker.errors[1].was_assigned_mutiple_units) self.assertFalse(cps_unit_checker.errors[1].is_unit_propagation_based_on_unknown_variable) self.assertFalse(cps_unit_checker.errors[1].is_warning) # STRONG INFERENCE BECAUSE ADDITION - SWAPPED OPERAND ORDER def test_it_weak_inference_addition_3 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_weak_inference_addition_3.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) # for e in cps_unit_checker.errors: # print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning)) var_name = 'tw.linear.x' var_linenr = 22 my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1.0}, {'second':1.}] actual_units = None # is_unit_propagation_based_on_unknown_variable = False for s in cps_unit_checker.current_configuration.scopes: # for v in s.var_ordered_dict: # print v if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertEqual(2, len(cps_unit_checker.errors)) self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units) self.assertTrue(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable) self.assertTrue(cps_unit_checker.errors[0].is_warning) # ADDITION STAND ALONE ERROR FOR ADDITION OF INCOMPATIBLE UNITS - STRONG def test_it_addition_without_assignment_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_addition_without_assignment_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) # for e in cps_unit_checker.errors: # print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning)) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) self.assertFalse(cps_unit_checker.errors[0].is_warning) # ADDITION STAND ALONE ERROR FOR ADDITION OF INCOMPATIBLE UNITS - WEAK UNKNOWN VARIABLE def test_it_addition_without_assignment_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_addition_without_assignment_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) # for e in cps_unit_checker.errors: # print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning)) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) self.assertTrue(cps_unit_checker.errors[0].is_warning) # ADDITION STAND ALONE ERROR FOR ADDITION OF INCOMPATIBLE UNITS - WEAK CONSTANT def test_it_addition_without_assignment_3(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_addition_without_assignment_3.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) self.assertTrue(cps_unit_checker.errors[0].is_warning) # ADDITION STAND ALONE ERROR FOR SUBTRACTION OF INCOMPATIBLE UNITS - STRONG CONSTANT def test_it_addition_without_assignment_4(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_addition_without_assignment_4.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) self.assertFalse(cps_unit_checker.errors[0].is_warning) # ADDITION OF RADIANS def test_it_radian_addition_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_radian_addition_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(2, len(cps_unit_checker.errors)) self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) self.assertFalse(cps_unit_checker.errors[0].is_warning) self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[1].ERROR_TYPE) self.assertFalse(cps_unit_checker.errors[1].is_warning) # ADDITION OF RADIANS def test_it_radian_addition_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_radian_addition_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) self.assertFalse(cps_unit_checker.errors[0].is_warning) # MULTIPLICATION OF RADIANS def test_it_radian_multiplication_1(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_radian_multiplication_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # MULTIPLICATION OF RADIANS 2 def test_it_radian_multiplication_2(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_radian_multiplication_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) self.assertFalse(cps_unit_checker.errors[0].is_warning) # MULTIPLICATION OF RADIANS def test_it_radian_multiplication_3(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_radian_multiplication_3.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # MULTIPLICATION OF RADIANS 2 def test_it_radian_multiplication_4(self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_radian_multiplication_4.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) self.assertFalse(cps_unit_checker.errors[0].is_warning) # getXYZ def test_it_getXYZ_1 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_getXYZ_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) # for e in cps_unit_checker.errors: # print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning)) self.assertEqual(0, len(cps_unit_checker.errors)) # getXYZ def test_it_getXYZ_2 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_getXYZ_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) # for e in cps_unit_checker.errors: # print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning)) var_name = 'tw.linear.x' var_linenr = 10 my_oracle = [{'second': -1.0, 'meter': 1.0}, {'meter':1}] actual_units = None # is_unit_propagation_based_on_unknown_variable = False for s in cps_unit_checker.current_configuration.scopes: # for v in s.var_ordered_dict: # print v if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units) # getXYZ def test_it_getXYZ_3 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_getXYZ_3.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) var_name = 'tw.linear.x' var_linenr = 10 my_oracle = [{'second': -1.0, 'meter': 1.0}, {'quaternion':1}] actual_units = None # is_unit_propagation_based_on_unknown_variable = False for s in cps_unit_checker.current_configuration.scopes: # for v in s.var_ordered_dict: # print v if s.className == 'main': if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]: actual_units = s.var_ordered_dict[var_name][var_linenr]['units'] self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units)) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units) # getXYZ def test_it_getXYZ_4 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_getXYZ_4.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) # for e in cps_unit_checker.errors: # print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning)) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) self.assertFalse(cps_unit_checker.errors[0].is_warning) # getXYZ def test_it_getXYZ_5 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_getXYZ_5.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) self.assertFalse(cps_unit_checker.errors[0].is_warning) # QUATERNION ADDITION 1 def test_it_quaternion_addition_1 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_quaternion_addition_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(2, len(cps_unit_checker.errors)) self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) self.assertFalse(cps_unit_checker.errors[0].is_warning) self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[1].ERROR_TYPE) self.assertFalse(cps_unit_checker.errors[1].is_warning) # QUATERNION ADDITION 2 def test_it_quaternion_addition_2 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_quaternion_addition_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) self.assertFalse(cps_unit_checker.errors[0].is_warning) # QUATERNION ADDITION 3 def test_it_quaternion_addition_3 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_quaternion_addition_3.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # QUATERNION ADDITION 4 def test_it_quaternion_addition_4 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_quaternion_addition_4.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # QUATERNION MULTIPLICATION 1 def test_it_quaternion_multiplication_1 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_quaternion_multiplication_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # QUATERNION MULTIPLICATION 2 def test_it_quaternion_multiplication_2 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_quaternion_multiplication_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # QUATERNION MULTIPLICATION 3 def test_it_quaternion_multiplication_3 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_quaternion_multiplication_3.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # QUATERNION MULTIPLICATION 4 def test_it_quaternion_multiplication_4 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_quaternion_multiplication_4.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # QUATERNION MULTIPLICATION CLOSURE def test_it_quaternion_closed_under_multiplication_1 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_quaternion_closed_under_multiplication_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # QUATERNION MULTIPLICATION CLOSURE def test_it_quaternion_closed_under_multiplication_2 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_quaternion_closed_under_multiplication_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) self.assertFalse(cps_unit_checker.errors[0].is_warning) # RADIAN MULTIPLICATION CLOSURE def test_it_radian_closed_under_multiplication_1 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_radian_closed_under_multiplication_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # RADIAN MULTIPLICATION CLOSURE def test_it_radian_closed_under_multiplication_2 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_radian_closed_under_multiplication_2.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(1, len(cps_unit_checker.errors)) self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE) self.assertFalse(cps_unit_checker.errors[0].is_warning) # dt Heuristic def test_it_dt_heuristic (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_dt_heuristic_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # dt Heuristic def test_it_plus_equals_1 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_plus_equals_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # dt Heuristic def test_it_range_1 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_range_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) # same named argument in interface scope bug def test_it_scope_bug_1 (self): cps_unit_checker = CPSUnitsChecker() cps_unit_checker.debug = False cps_unit_checker.debug_print_AST = False dump_file = './dump_files_for_tests/test_it_cppcheck_scope_bug_at_argument_1.cpp.dump' source_file = dump_file.replace('.dump','') cps_unit_checker.main_run_check(dump_file, source_file) self.assertEqual(0, len(cps_unit_checker.errors)) if __name__ == '__main__': unittest.main()
test_known_functions_atan2_1
block.rs
use octword::u64x2; use std::mem; use std::ops::{BitXorAssign, Index, IndexMut}; use std::slice::{Iter, IterMut}; pub const ARGON2_BLOCK_BYTES: usize = 1024; macro_rules! per_kib { (u8) => { ARGON2_BLOCK_BYTES }; (u64) => { ARGON2_BLOCK_BYTES / 8 }; (u64x2) => { ARGON2_BLOCK_BYTES / 16 }; } pub struct Block([u64x2; per_kib!(u64x2)]); impl Clone for Block { #[inline(always)] fn clone(&self) -> Self { let inner = self.0; Block(inner) } } impl Block { pub fn iter_mut(&mut self) -> IterMut<u64x2> { self.0.iter_mut() } pub fn iter(&self) -> Iter<u64x2> { self.0.iter() } pub fn as_u8_mut(&mut self) -> &mut [u8] { let rv: &mut [u8; per_kib!(u8)] = unsafe { mem::transmute(&mut self.0) }; rv } pub fn as_u8(&self) -> &[u8] { let rv: &[u8; per_kib!(u8)] = unsafe { mem::transmute(&self.0) }; rv } pub fn as_u64(&self) -> &[u64] { let rv: &[u64; per_kib!(u64)] = unsafe { mem::transmute(&self.0) }; rv } } impl<'a> BitXorAssign<&'a Block> for Block { #[inline(always)] fn bitxor_assign(&mut self, rhs: &Block) { for (d, r) in self.0.iter_mut().zip(rhs.0.iter()) { *d = *d ^ *r; } } } impl<'a, 'b> BitXorAssign<(&'a Block, &'b Block)> for Block { #[inline(always)] fn bitxor_assign(&mut self, (a, b): (&Block, &Block)) { for (d, (l, r)) in self.0.iter_mut().zip(a.0.iter().zip(b.0.iter())) { *d = *d ^ *l ^ *r; } } } impl Index<usize> for Block { type Output = u64x2; #[inline(always)] fn index(&self, idx: usize) -> &Self::Output { unsafe { self.0.get_unchecked(idx) } } } impl IndexMut<usize> for Block { #[inline(always)] fn index_mut(&mut self, idx: usize) -> &mut u64x2 { unsafe { self.0.get_unchecked_mut(idx) } } } pub fn
() -> Block { Block([u64x2(0, 0); per_kib!(u64x2)]) } pub struct Matrix { blocks: Vec<Block>, lanes: u32, lanelen: u32, } impl Index<(u32, u32)> for Matrix { type Output = Block; #[inline(always)] fn index(&self, idx: (u32, u32)) -> &Block { let (row, col) = idx; debug_assert!(row < self.lanes && col < self.lanelen); unsafe { self.blocks .get_unchecked(row as usize * self.lanelen as usize + col as usize) } } } impl IndexMut<(u32, u32)> for Matrix { #[inline(always)] fn index_mut(&mut self, idx: (u32, u32)) -> &mut Block { let (row, col) = idx; debug_assert!(row < self.lanes && col < self.lanelen); unsafe { self.blocks .get_unchecked_mut(row as usize * self.lanelen as usize + col as usize) } } } impl Matrix { pub fn new(lanes: u32, lanelen: u32) -> Self { debug_assert!(lanes > 0 && lanelen > 0); Matrix { blocks: vec![zero(); lanelen as usize * lanes as usize], lanes: lanes, lanelen: lanelen, } } pub fn get3( &mut self, wr: (u32, u32), rd0: (u32, u32), rd1: (u32, u32), ) -> (&mut Block, &Block, &Block) { assert!(wr != rd0 && wr != rd1); let p: *mut Matrix = self; unsafe { (&mut (*p)[wr], &(*p)[rd0], &(*p)[rd1]) } } // Xors the Blocks of column `col` together. pub fn xor_column(&self, col: u32) -> Block { debug_assert!(col < self.lanelen); let mut rv = self[(0, col)].clone(); for row in 1..self.lanes { rv ^= &self[(row, col)]; } rv } pub fn iter(&self) -> Iter<Block> { self.blocks.iter() } } impl Drop for Matrix { fn drop(&mut self) { for blk in self.blocks.iter_mut() { *blk = zero(); } } }
zero
cap.rs
#[doc = "Register `CAP` reader"] pub struct
(crate::R<CAP_SPEC>); impl core::ops::Deref for R { type Target = crate::R<CAP_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<CAP_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<CAP_SPEC>) -> Self { R(reader) } } #[doc = "Show ARM TrustZone status\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TZM_A { #[doc = "0: ARM TrustZone support not available"] NOTAVAILABLE = 0, #[doc = "1: ARM TrustZone support is available"] ENABLED = 1, } impl From<TZM_A> for bool { #[inline(always)] fn from(variant: TZM_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TZM` reader - Show ARM TrustZone status"] pub struct TZM_R(crate::FieldReader<bool, TZM_A>); impl TZM_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { TZM_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TZM_A { match self.bits { false => TZM_A::NOTAVAILABLE, true => TZM_A::ENABLED, } } #[doc = "Checks if the value of the field is `NOTAVAILABLE`"] #[inline(always)] pub fn is_not_available(&self) -> bool { **self == TZM_A::NOTAVAILABLE } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { **self == TZM_A::ENABLED } } impl core::ops::Deref for TZM_R { type Target = crate::FieldReader<bool, TZM_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl R { #[doc = "Bit 0 - Show ARM TrustZone status"] #[inline(always)] pub fn tzm(&self) -> TZM_R { TZM_R::new((self.bits & 0x01) != 0) } } #[doc = "Show implemented features for the current device\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [cap](index.html) module"] pub struct CAP_SPEC; impl crate::RegisterSpec for CAP_SPEC { type Ux = u32; } #[doc = "`read()` method returns [cap::R](R) reader structure"] impl crate::Readable for CAP_SPEC { type Reader = R; } #[doc = "`reset()` method sets CAP to value 0x01"] impl crate::Resettable for CAP_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0x01 } }
R
Checkbox.tsx
// Dependencies import React, { FC, ComponentPropsWithoutRef, MouseEvent } from 'react' import { cxGenerator, cx } from '@contentpi/lib' // Types import { Color, Shape } from '../../types' // Styles import { Checkbox, CheckboxWrapper, CheckboxChild, CheckboxText, BASE_CLASS_NAME, } from './Checkbox.styled' interface IProps extends ComponentPropsWithoutRef<'input'> {
color?: Color checked?: boolean label?: string name?: string onClick?: (e: MouseEvent<HTMLElement>) => void value?: string shape?: Shape } const CheckboxComponent: FC<IProps> = ({ label, checked, color = Color.primary, shape = Shape.regular, ...checkboxProps }) => { const classNames = cxGenerator({ ccn: BASE_CLASS_NAME, data: [color, shape], }) return ( <CheckboxWrapper data-component="Checkbox"> <> <CheckboxText>{label}</CheckboxText> <Checkbox {...checkboxProps} type="checkbox" checked={checked} /> <CheckboxChild className={cx(classNames, 'checkmark')} /> </> </CheckboxWrapper> ) } export default CheckboxComponent
interface.go
// Copyright 2018 Intel Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by informer-gen. DO NOT EDIT. package v1 import ( internalinterfaces "github.com/intel/MLU100-Device-Plugin/pkg/client/informers/externalversions/internalinterfaces" ) // Interface provides access to all the informers in this group version. type Interface interface { // AcceleratorFunctions returns a AcceleratorFunctionInformer. AcceleratorFunctions() AcceleratorFunctionInformer // FpgaRegions returns a FpgaRegionInformer. FpgaRegions() FpgaRegionInformer } type version struct { factory internalinterfaces.SharedInformerFactory namespace string tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. func
(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // AcceleratorFunctions returns a AcceleratorFunctionInformer. func (v *version) AcceleratorFunctions() AcceleratorFunctionInformer { return &acceleratorFunctionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // FpgaRegions returns a FpgaRegionInformer. func (v *version) FpgaRegions() FpgaRegionInformer { return &fpgaRegionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} }
New
cipher.py
from abc import * class Cipher(metaclass=ABCMeta): WORDSIZE = 0 WORDMASK = 0 NUM_ROUNDS = 0
@abstractmethod def name(self): pass @abstractmethod def expand_key(self, mk, num_rounds): pass @abstractmethod def encrypt_one_round(self, pt, rk): pass @abstractmethod def decrypt_one_round(self, pt, rk): pass ## 왼쪽 회전 연산 # @param self 객체 포인터 # @param value 회전 연산을 수행할 변수값 # @param amount 회전량 def rol(self, value, amount): return ((value << amount) | (value >> (self.WORDSIZE - amount))) & self.WORDMASK ## 오른쪽 회전 연산 # @param self 객체 포인터 # @param value 회전 연산을 수행할 변수값 # @param amount 회전량 def ror(self, value, amount): return ((value >> amount) | (value << (self.WORDSIZE - amount))) & self.WORDMASK ## 여러 라운드 암호화 # @param self 객체 포인터 # @param pt 암호화 할 평문 # @param rk 라운드 키 def encrypt(self, pt, rks): x, y = pt[0], pt[1] for rk in rks: x, y = self.encrypt_one_round((x, y), rk) return x, y ## 여러 라운드 복호화 # @param self 객체 포인터 # @param ct 복호화 할 암호문 # @param rk 라운드 키 def decrypt(self, ct, rks): x, y = ct[0], ct[1] for rk in reversed(rks): x, y = self.decrypt_one_round((x, y), rk) return x, y ## 테스트벡터 확인 # @param self 객체 포인터 def check_testvector(self, key, pt, ct): rks = self.expand_key(key, self.NUM_ROUNDS) enc = self.encrypt(pt, rks) dec = self.decrypt(ct, rks) if (enc == ct and dec == pt): print("testvector verified") if (enc != ct): print("encryption failed") print(' '.join(format(x, '04x') for x in ct)) print(' '.join(format(x, '04x') for x in enc)) if (dec != pt): print("decryption failed") print(' '.join(format(x, '04x') for x in pt)) print(' '.join(format(x, '04x') for x in dec))
test_integration.py
from flask_testing import LiveServerTestCase from selenium import webdriver from urllib.request import urlopen from flask import url_for from application import app, db from application.models import Players, Items class TestBase(LiveServerTestCase): def create_app(self): app.config["SQLALCHEMY_DATABASE_URI"]="sqlite:///test.db" app.config["LIVESERVER_PORT"] = 5050 app.config['SECRET_KEY'] = "secret" app.config["DEBUG"]= True app.config["TESTING"]= True return app def setUp(self): chrome_options = webdriver.chrome.options.Options() chrome_options.add_argument("--headless") self.driver = webdriver.Chrome(options=chrome_options) db.create_all() self.driver.get(f"http://localhost:5050") def tearDown(self): self.driver.quit() db.drop_all()
def test_server_working(self): response = urlopen("http://localhost:5050") self.assertEqual(response.code, 200) class TestViews(TestBase): def test_buttons_navigation(self): self.driver.find_element_by_xpath("/html/body/table/tbody/tr/td[2]/a").click() self.assertIn(url_for("showplayers"), self.driver.current_url) def test_entries(self): self.driver.find_element_by_xpath("/html/body/table/tbody/tr/td[4]/a").click() self.driver.find_element_by_xpath('//*[@id="player_name"]').send_keys("Player3") self.driver.find_element_by_xpath('//*[@id="player_class"]').send_keys("Tester") self.driver.find_element_by_xpath('//*[@id="level"]').send_keys("10") self.driver.find_element_by_xpath('//*[@id="submit"]').click() players = Players.query.first() self.assertEqual(players.player_name, "Player3")
select.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! See `README.md` for high-level documentation #![allow(dead_code)] // FIXME -- just temporarily pub use self::MethodMatchResult::*; pub use self::MethodMatchedData::*; use self::SelectionCandidate::*; use self::BuiltinBoundConditions::*; use self::EvaluationResult::*; use super::coherence; use super::DerivedObligationCause; use super::project; use super::project::{normalize_with_depth, Normalized}; use super::{PredicateObligation, TraitObligation, ObligationCause}; use super::report_overflow_error; use super::{ObligationCauseCode, BuiltinDerivedObligation, ImplDerivedObligation}; use super::{SelectionError, Unimplemented, OutputTypeParameterMismatch}; use super::Selection; use super::SelectionResult; use super::{VtableBuiltin, VtableImpl, VtableParam, VtableClosure, VtableFnPointer, VtableObject, VtableDefaultImpl}; use super::{VtableImplData, VtableObjectData, VtableBuiltinData, VtableDefaultImplData}; use super::object_safety; use super::util; use middle::fast_reject; use middle::subst::{Subst, Substs, TypeSpace, VecPerParamSpace}; use middle::ty::{self, RegionEscape, ToPolyTraitRef, Ty}; use middle::infer; use middle::infer::{InferCtxt, TypeFreshener}; use middle::ty_fold::TypeFoldable; use middle::ty_match; use middle::ty_relate::TypeRelation; use std::cell::RefCell; use std::rc::Rc; use syntax::{abi, ast}; use util::common::ErrorReported; use util::nodemap::FnvHashMap; use util::ppaux::Repr; pub struct SelectionContext<'cx, 'tcx:'cx> { infcx: &'cx InferCtxt<'cx, 'tcx>, closure_typer: &'cx (ty::ClosureTyper<'tcx>+'cx), /// Freshener used specifically for skolemizing entries on the /// obligation stack. This ensures that all entries on the stack /// at one time will have the same set of skolemized entries, /// which is important for checking for trait bounds that /// recursively require themselves. freshener: TypeFreshener<'cx, 'tcx>, /// If true, indicates that the evaluation should be conservative /// and consider the possibility of types outside this crate. /// This comes up primarily when resolving ambiguity. Imagine /// there is some trait reference `$0 : Bar` where `$0` is an /// inference variable. If `intercrate` is true, then we can never /// say for sure that this reference is not implemented, even if /// there are *no impls at all for `Bar`*, because `$0` could be /// bound to some type that in a downstream crate that implements /// `Bar`. This is the suitable mode for coherence. Elsewhere, /// though, we set this to false, because we are only interested /// in types that the user could actually have written --- in /// other words, we consider `$0 : Bar` to be unimplemented if /// there is no type that the user could *actually name* that /// would satisfy it. This avoids crippling inference, basically. intercrate: bool, } // A stack that walks back up the stack frame. struct TraitObligationStack<'prev, 'tcx: 'prev> { obligation: &'prev TraitObligation<'tcx>, /// Trait ref from `obligation` but skolemized with the /// selection-context's freshener. Used to check for recursion. fresh_trait_ref: ty::PolyTraitRef<'tcx>, previous: TraitObligationStackList<'prev, 'tcx>, } #[derive(Clone)] pub struct SelectionCache<'tcx> { hashmap: RefCell<FnvHashMap<Rc<ty::TraitRef<'tcx>>, SelectionResult<'tcx, SelectionCandidate<'tcx>>>>, } pub enum MethodMatchResult { MethodMatched(MethodMatchedData), MethodAmbiguous(/* list of impls that could apply */ Vec<ast::DefId>), MethodDidNotMatch, } #[derive(Copy, Clone, Debug)] pub enum MethodMatchedData { // In the case of a precise match, we don't really need to store // how the match was found. So don't. PreciseMethodMatch, // In the case of a coercion, we need to know the precise impl so // that we can determine the type to which things were coerced. CoerciveMethodMatch(/* impl we matched */ ast::DefId) } /// The selection process begins by considering all impls, where /// clauses, and so forth that might resolve an obligation. Sometimes /// we'll be able to say definitively that (e.g.) an impl does not /// apply to the obligation: perhaps it is defined for `usize` but the /// obligation is for `int`. In that case, we drop the impl out of the /// list. But the other cases are considered *candidates*. /// /// Candidates can either be definitive or ambiguous. An ambiguous /// candidate is one that might match or might not, depending on how /// type variables wind up being resolved. This only occurs during inference. /// /// For selection to succeed, there must be exactly one non-ambiguous /// candidate. Usually, it is not possible to have more than one /// definitive candidate, due to the coherence rules. However, there is /// one case where it could occur: if there is a blanket impl for a /// trait (that is, an impl applied to all T), and a type parameter /// with a where clause. In that case, we can have a candidate from the /// where clause and a second candidate from the impl. This is not a /// problem because coherence guarantees us that the impl which would /// be used to satisfy the where clause is the same one that we see /// now. To resolve this issue, therefore, we ignore impls if we find a /// matching where clause. Part of the reason for this is that where /// clauses can give additional information (like, the types of output /// parameters) that would have to be inferred from the impl. #[derive(PartialEq,Eq,Debug,Clone)] enum SelectionCandidate<'tcx> { PhantomFnCandidate, BuiltinCandidate(ty::BuiltinBound), ParamCandidate(ty::PolyTraitRef<'tcx>), ImplCandidate(ast::DefId), DefaultImplCandidate(ast::DefId), DefaultImplObjectCandidate(ast::DefId), /// This is a trait matching with a projected type as `Self`, and /// we found an applicable bound in the trait definition. ProjectionCandidate, /// Implementation of a `Fn`-family trait by one of the /// anonymous types generated for a `||` expression. ClosureCandidate(/* closure */ ast::DefId, Substs<'tcx>), /// Implementation of a `Fn`-family trait by one of the anonymous /// types generated for a fn pointer type (e.g., `fn(int)->int`) FnPointerCandidate, ObjectCandidate, BuiltinObjectCandidate, ErrorCandidate, } struct SelectionCandidateSet<'tcx> { // a list of candidates that definitely apply to the current // obligation (meaning: types unify). vec: Vec<SelectionCandidate<'tcx>>, // if this is true, then there were candidates that might or might // not have applied, but we couldn't tell. This occurs when some // of the input types are type variables, in which case there are // various "builtin" rules that might or might not trigger. ambiguous: bool, } enum BuiltinBoundConditions<'tcx> { If(ty::Binder<Vec<Ty<'tcx>>>), ParameterBuiltin, AmbiguousBuiltin } #[derive(Debug)] enum EvaluationResult<'tcx> { EvaluatedToOk, EvaluatedToAmbig, EvaluatedToErr(SelectionError<'tcx>), } impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>, closure_typer: &'cx ty::ClosureTyper<'tcx>) -> SelectionContext<'cx, 'tcx> { SelectionContext { infcx: infcx, closure_typer: closure_typer, freshener: infcx.freshener(), intercrate: false, } } pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'tcx>, closure_typer: &'cx ty::ClosureTyper<'tcx>) -> SelectionContext<'cx, 'tcx> { SelectionContext { infcx: infcx, closure_typer: closure_typer, freshener: infcx.freshener(), intercrate: true, } } pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> { self.infcx } pub fn tcx(&self) -> &'cx ty::ctxt<'tcx> { self.infcx.tcx } pub fn param_env(&self) -> &'cx ty::ParameterEnvironment<'cx, 'tcx> { self.closure_typer.param_env() } pub fn closure_typer(&self) -> &'cx (ty::ClosureTyper<'tcx>+'cx) { self.closure_typer } /////////////////////////////////////////////////////////////////////////// // Selection // // The selection phase tries to identify *how* an obligation will // be resolved. For example, it will identify which impl or // parameter bound is to be used. The process can be inconclusive // if the self type in the obligation is not fully inferred. Selection // can result in an error in one of two ways: // // 1. If no applicable impl or parameter bound can be found. // 2. If the output type parameters in the obligation do not match // those specified by the impl/bound. For example, if the obligation // is `Vec<Foo>:Iterable<Bar>`, but the impl specifies // `impl<T> Iterable<T> for Vec<T>`, than an error would result. /// Attempts to satisfy the obligation. If successful, this will affect the surrounding /// type environment by performing unification. pub fn select(&mut self, obligation: &TraitObligation<'tcx>) -> SelectionResult<'tcx, Selection<'tcx>> { debug!("select({})", obligation.repr(self.tcx())); assert!(!obligation.predicate.has_escaping_regions()); let stack = self.push_stack(TraitObligationStackList::empty(), obligation); match try!(self.candidate_from_obligation(&stack)) { None => { self.consider_unification_despite_ambiguity(obligation); Ok(None) } Some(candidate) => Ok(Some(try!(self.confirm_candidate(obligation, candidate)))), } } /// In the particular case of unboxed closure obligations, we can /// sometimes do some amount of unification for the /// argument/return types even though we can't yet fully match obligation. /// The particular case we are interesting in is an obligation of the form: /// /// C : FnFoo<A> /// /// where `C` is an unboxed closure type and `FnFoo` is one of the /// `Fn` traits. Because we know that users cannot write impls for closure types /// themselves, the only way that `C : FnFoo` can fail to match is under two /// conditions: /// /// 1. The closure kind for `C` is not yet known, because inference isn't complete. /// 2. The closure kind for `C` *is* known, but doesn't match what is needed. /// For example, `C` may be a `FnOnce` closure, but a `Fn` closure is needed. /// /// In either case, we always know what argument types are /// expected by `C`, no matter what kind of `Fn` trait it /// eventually matches. So we can go ahead and unify the argument /// types, even though the end result is ambiguous. /// /// Note that this is safe *even if* the trait would never be /// matched (case 2 above). After all, in that case, an error will /// result, so it kind of doesn't matter what we do --- unifying /// the argument types can only be helpful to the user, because /// once they patch up the kind of closure that is expected, the /// argment types won't really change. fn consider_unification_despite_ambiguity(&mut self, obligation: &TraitObligation<'tcx>) { // Is this a `C : FnFoo(...)` trait reference for some trait binding `FnFoo`? match self.tcx().lang_items.fn_trait_kind(obligation.predicate.0.def_id()) { Some(_) => { } None => { return; } } // Is the self-type a closure type? We ignore bindings here // because if it is a closure type, it must be a closure type from // within this current fn, and hence none of the higher-ranked // lifetimes can appear inside the self-type. let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); let (closure_def_id, substs) = match self_ty.sty { ty::ty_closure(id, ref substs) => (id, substs.clone()), _ => { return; } }; assert!(!substs.has_escaping_regions()); let closure_trait_ref = self.closure_trait_ref(obligation, closure_def_id, substs); match self.confirm_poly_trait_refs(obligation.cause.clone(), obligation.predicate.to_poly_trait_ref(), closure_trait_ref) { Ok(()) => { } Err(_) => { /* Silently ignore errors. */ } } } /////////////////////////////////////////////////////////////////////////// // EVALUATION // // Tests whether an obligation can be selected or whether an impl // can be applied to particular types. It skips the "confirmation" // step and hence completely ignores output type parameters. // // The result is "true" if the obligation *may* hold and "false" if // we can be sure it does not. /// Evaluates whether the obligation `obligation` can be satisfied (by any means). pub fn evaluate_obligation(&mut self, obligation: &PredicateObligation<'tcx>) -> bool { debug!("evaluate_obligation({})", obligation.repr(self.tcx())); self.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation) .may_apply() } fn evaluate_builtin_bound_recursively<'o>(&mut self, bound: ty::BuiltinBound, previous_stack: &TraitObligationStack<'o, 'tcx>, ty: Ty<'tcx>) -> EvaluationResult<'tcx> { let obligation = util::predicate_for_builtin_bound( self.tcx(), previous_stack.obligation.cause.clone(), bound, previous_stack.obligation.recursion_depth + 1, ty); match obligation { Ok(obligation) => { self.evaluate_predicate_recursively(previous_stack.list(), &obligation) } Err(ErrorReported) => { EvaluatedToOk } } } fn evaluate_predicates_recursively<'a,'o,I>(&mut self, stack: TraitObligationStackList<'o, 'tcx>, predicates: I) -> EvaluationResult<'tcx> where I : Iterator<Item=&'a PredicateObligation<'tcx>>, 'tcx:'a { let mut result = EvaluatedToOk; for obligation in predicates { match self.evaluate_predicate_recursively(stack, obligation) { EvaluatedToErr(e) => { return EvaluatedToErr(e); } EvaluatedToAmbig => { result = EvaluatedToAmbig; } EvaluatedToOk => { } } } result } fn evaluate_predicate_recursively<'o>(&mut self, previous_stack: TraitObligationStackList<'o, 'tcx>, obligation: &PredicateObligation<'tcx>) -> EvaluationResult<'tcx> { debug!("evaluate_predicate_recursively({})", obligation.repr(self.tcx())); match obligation.predicate { ty::Predicate::Trait(ref t) => { assert!(!t.has_escaping_regions()); let obligation = obligation.with(t.clone()); self.evaluate_obligation_recursively(previous_stack, &obligation) } ty::Predicate::Equate(ref p) => { let result = self.infcx.probe(|_| { self.infcx.equality_predicate(obligation.cause.span, p) }); match result { Ok(()) => EvaluatedToOk, Err(_) => EvaluatedToErr(Unimplemented), } } ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => { // we do not consider region relationships when // evaluating trait matches EvaluatedToOk } ty::Predicate::Projection(ref data) => { self.infcx.probe(|_| { let project_obligation = obligation.with(data.clone()); match project::poly_project_and_unify_type(self, &project_obligation) { Ok(Some(subobligations)) => { self.evaluate_predicates_recursively(previous_stack, subobligations.iter()) } Ok(None) => { EvaluatedToAmbig } Err(_) => { EvaluatedToErr(Unimplemented) } } }) } } } fn evaluate_obligation_recursively<'o>(&mut self, previous_stack: TraitObligationStackList<'o, 'tcx>, obligation: &TraitObligation<'tcx>) -> EvaluationResult<'tcx> { debug!("evaluate_obligation_recursively({})", obligation.repr(self.tcx())); let stack = self.push_stack(previous_stack, obligation); let result = self.evaluate_stack(&stack); debug!("result: {:?}", result); result } fn evaluate_stack<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> EvaluationResult<'tcx> { // In intercrate mode, whenever any of the types are unbound, // there can always be an impl. Even if there are no impls in // this crate, perhaps the type would be unified with // something from another crate that does provide an impl. // // In intracrate mode, we must still be conservative. The reason is // that we want to avoid cycles. Imagine an impl like: // // impl<T:Eq> Eq for Vec<T> // // and a trait reference like `$0 : Eq` where `$0` is an // unbound variable. When we evaluate this trait-reference, we // will unify `$0` with `Vec<$1>` (for some fresh variable // `$1`), on the condition that `$1 : Eq`. We will then wind // up with many candidates (since that are other `Eq` impls // that apply) and try to winnow things down. This results in // a recursive evaluation that `$1 : Eq` -- as you can // imagine, this is just where we started. To avoid that, we // check for unbound variables and return an ambiguous (hence possible) // match if we've seen this trait before. // // This suffices to allow chains like `FnMut` implemented in // terms of `Fn` etc, but we could probably make this more // precise still. let input_types = stack.fresh_trait_ref.0.input_types(); let unbound_input_types = input_types.iter().any(|&t| ty::type_is_fresh(t)); if unbound_input_types && (self.intercrate || stack.iter().skip(1).any( |prev| self.match_fresh_trait_refs(&stack.fresh_trait_ref, &prev.fresh_trait_ref))) { debug!("evaluate_stack({}) --> unbound argument, recursion --> ambiguous", stack.fresh_trait_ref.repr(self.tcx())); return EvaluatedToAmbig; } // If there is any previous entry on the stack that precisely // matches this obligation, then we can assume that the // obligation is satisfied for now (still all other conditions // must be met of course). One obvious case this comes up is // marker traits like `Send`. Think of a linked list: // // struct List<T> { data: T, next: Option<Box<List<T>>> { // // `Box<List<T>>` will be `Send` if `T` is `Send` and // `Option<Box<List<T>>>` is `Send`, and in turn // `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is // `Send`. // // Note that we do this comparison using the `fresh_trait_ref` // fields. Because these have all been skolemized using // `self.freshener`, we can be sure that (a) this will not // affect the inferencer state and (b) that if we see two // skolemized types with the same index, they refer to the // same unbound type variable. if stack.iter() .skip(1) // skip top-most frame .any(|prev| stack.fresh_trait_ref == prev.fresh_trait_ref) { debug!("evaluate_stack({}) --> recursive", stack.fresh_trait_ref.repr(self.tcx())); return EvaluatedToOk; } match self.candidate_from_obligation(stack) { Ok(Some(c)) => self.winnow_candidate(stack, &c), Ok(None) => EvaluatedToAmbig, Err(e) => EvaluatedToErr(e), } } /// Evaluates whether the impl with id `impl_def_id` could be applied to the self type /// `obligation_self_ty`. This can be used either for trait or inherent impls. pub fn evaluate_impl(&mut self, impl_def_id: ast::DefId, obligation: &TraitObligation<'tcx>) -> bool { debug!("evaluate_impl(impl_def_id={}, obligation={})", impl_def_id.repr(self.tcx()), obligation.repr(self.tcx())); self.infcx.probe(|snapshot| { let (skol_obligation_trait_ref, skol_map) = self.infcx().skolemize_late_bound_regions(&obligation.predicate, snapshot); match self.match_impl(impl_def_id, obligation, snapshot, &skol_map, skol_obligation_trait_ref.trait_ref.clone()) { Ok(substs) => { let vtable_impl = self.vtable_impl(impl_def_id, substs, obligation.cause.clone(), obligation.recursion_depth + 1, skol_map, snapshot); self.winnow_selection(TraitObligationStackList::empty(), VtableImpl(vtable_impl)).may_apply() } Err(()) => { false } } }) } /////////////////////////////////////////////////////////////////////////// // CANDIDATE ASSEMBLY // // The selection process begins by examining all in-scope impls, // caller obligations, and so forth and assembling a list of // candidates. See `README.md` and the `Candidate` type for more // details. fn candidate_from_obligation<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { // Watch out for overflow. This intentionally bypasses (and does // not update) the cache. let recursion_limit = self.infcx.tcx.sess.recursion_limit.get(); if stack.obligation.recursion_depth >= recursion_limit { report_overflow_error(self.infcx(), &stack.obligation); } // Check the cache. Note that we skolemize the trait-ref // separately rather than using `stack.fresh_trait_ref` -- this // is because we want the unbound variables to be replaced // with fresh skolemized types starting from index 0. let cache_fresh_trait_pred = self.infcx.freshen(stack.obligation.predicate.clone()); debug!("candidate_from_obligation(cache_fresh_trait_pred={}, obligation={})", cache_fresh_trait_pred.repr(self.tcx()), stack.repr(self.tcx())); assert!(!stack.obligation.predicate.has_escaping_regions()); match self.check_candidate_cache(&cache_fresh_trait_pred) { Some(c) => { debug!("CACHE HIT: cache_fresh_trait_pred={}, candidate={}", cache_fresh_trait_pred.repr(self.tcx()), c.repr(self.tcx())); return c; } None => { } } // If no match, compute result and insert into cache. let candidate = self.candidate_from_obligation_no_cache(stack); if self.should_update_candidate_cache(&cache_fresh_trait_pred, &candidate) { debug!("CACHE MISS: cache_fresh_trait_pred={}, candidate={}", cache_fresh_trait_pred.repr(self.tcx()), candidate.repr(self.tcx())); self.insert_candidate_cache(cache_fresh_trait_pred, candidate.clone()); } candidate } fn candidate_from_obligation_no_cache<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { if ty::type_is_error(stack.obligation.predicate.0.self_ty()) { return Ok(Some(ErrorCandidate)); } if !self.is_knowable(stack) { debug!("intercrate not knowable"); return Ok(None); } let candidate_set = try!(self.assemble_candidates(stack)); if candidate_set.ambiguous { debug!("candidate set contains ambig"); return Ok(None); } let mut candidates = candidate_set.vec; debug!("assembled {} candidates for {}: {}", candidates.len(), stack.repr(self.tcx()), candidates.repr(self.tcx())); // At this point, we know that each of the entries in the // candidate set is *individually* applicable. Now we have to // figure out if they contain mutual incompatibilities. This // frequently arises if we have an unconstrained input type -- // for example, we are looking for $0:Eq where $0 is some // unconstrained type variable. In that case, we'll get a // candidate which assumes $0 == int, one that assumes $0 == // usize, etc. This spells an ambiguity. // If there is more than one candidate, first winnow them down // by considering extra conditions (nested obligations and so // forth). We don't winnow if there is exactly one // candidate. This is a relatively minor distinction but it // can lead to better inference and error-reporting. An // example would be if there was an impl: // // impl<T:Clone> Vec<T> { fn push_clone(...) { ... } } // // and we were to see some code `foo.push_clone()` where `boo` // is a `Vec<Bar>` and `Bar` does not implement `Clone`. If // we were to winnow, we'd wind up with zero candidates. // Instead, we select the right impl now but report `Bar does // not implement Clone`. if candidates.len() > 1 { candidates.retain(|c| self.winnow_candidate(stack, c).may_apply()) } // If there are STILL multiple candidate, we can further reduce // the list by dropping duplicates. if candidates.len() > 1 { let mut i = 0; while i < candidates.len() { let is_dup = (0..candidates.len()) .filter(|&j| i != j) .any(|j| self.candidate_should_be_dropped_in_favor_of(&candidates[i], &candidates[j])); if is_dup { debug!("Dropping candidate #{}/{}: {}", i, candidates.len(), candidates[i].repr(self.tcx())); candidates.swap_remove(i); } else { debug!("Retaining candidate #{}/{}: {}", i, candidates.len(), candidates[i].repr(self.tcx())); i += 1; } } } // If there are *STILL* multiple candidates, give up and // report ambiguity. if candidates.len() > 1 { debug!("multiple matches, ambig"); return Ok(None); } // If there are *NO* candidates, that there are no impls -- // that we know of, anyway. Note that in the case where there // are unbound type variables within the obligation, it might // be the case that you could still satisfy the obligation // from another crate by instantiating the type variables with // a type from another crate that does have an impl. This case // is checked for in `evaluate_stack` (and hence users // who might care about this case, like coherence, should use // that function). if candidates.len() == 0 { return Err(Unimplemented); } // Just one candidate left. let candidate = candidates.pop().unwrap(); match candidate { ImplCandidate(def_id) => { match ty::trait_impl_polarity(self.tcx(), def_id) { Some(ast::ImplPolarity::Negative) => return Err(Unimplemented), _ => {} } } _ => {} } Ok(Some(candidate)) } fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> bool { debug!("is_knowable(intercrate={})", self.intercrate); if !self.intercrate { return true; } let obligation = &stack.obligation; let predicate = self.infcx().resolve_type_vars_if_possible(&obligation.predicate); // ok to skip binder because of the nature of the // trait-ref-is-knowable check, which does not care about // bound regions let trait_ref = &predicate.skip_binder().trait_ref; coherence::trait_ref_is_knowable(self.tcx(), trait_ref) } fn pick_candidate_cache(&self) -> &SelectionCache<'tcx> { // If there are any where-clauses in scope, then we always use // a cache local to this particular scope. Otherwise, we // switch to a global cache. We used to try and draw // finer-grained distinctions, but that led to a serious of // annoying and weird bugs like #22019 and #18290. This simple // rule seems to be pretty clearly safe and also still retains // a very high hit rate (~95% when compiling rustc). if !self.param_env().caller_bounds.is_empty() { return &self.param_env().selection_cache; } // Avoid using the master cache during coherence and just rely // on the local cache. This effectively disables caching // during coherence. It is really just a simplification to // avoid us having to fear that coherence results "pollute" // the master cache. Since coherence executes pretty quickly, // it's not worth going to more trouble to increase the // hit-rate I don't think. if self.intercrate { return &self.param_env().selection_cache; } // Otherwise, we can use the global cache. &self.tcx().selection_cache } fn check_candidate_cache(&mut self, cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> { let cache = self.pick_candidate_cache(); let hashmap = cache.hashmap.borrow(); hashmap.get(&cache_fresh_trait_pred.0.trait_ref).cloned() } fn insert_candidate_cache(&mut self, cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>, candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>) { let cache = self.pick_candidate_cache(); let mut hashmap = cache.hashmap.borrow_mut(); hashmap.insert(cache_fresh_trait_pred.0.trait_ref.clone(), candidate); } fn should_update_candidate_cache(&mut self, cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>, candidate: &SelectionResult<'tcx, SelectionCandidate<'tcx>>) -> bool { // In general, it's a good idea to cache results, even // ambiguous ones, to save us some trouble later. But we have // to be careful not to cache results that could be // invalidated later by advances in inference. Normally, this // is not an issue, because any inference variables whose // types are not yet bound are "freshened" in the cache key, // which means that if we later get the same request once that // type variable IS bound, we'll have a different cache key. // For example, if we have `Vec<_#0t> : Foo`, and `_#0t` is // not yet known, we may cache the result as `None`. But if // later `_#0t` is bound to `Bar`, then when we freshen we'll // have `Vec<Bar> : Foo` as the cache key. // // HOWEVER, it CAN happen that we get an ambiguity result in // one particular case around closures where the cache key // would not change. That is when the precise types of the // upvars that a closure references have not yet been figured // out (i.e., because it is not yet known if they are captured // by ref, and if by ref, what kind of ref). In these cases, // when matching a builtin bound, we will yield back an // ambiguous result. But the *cache key* is just the closure type, // it doesn't capture the state of the upvar computation. // // To avoid this trap, just don't cache ambiguous results if // the self-type contains no inference byproducts (that really // shouldn't happen in other circumstances anyway, given // coherence). match *candidate { Ok(Some(_)) | Err(_) => true, Ok(None) => { cache_fresh_trait_pred.0.input_types().iter().any(|&t| ty::type_has_ty_infer(t)) } } } fn assemble_candidates<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Result<SelectionCandidateSet<'tcx>, SelectionError<'tcx>> { let TraitObligationStack { obligation, .. } = *stack; let mut candidates = SelectionCandidateSet { vec: Vec::new(), ambiguous: false }; // Check for the `PhantomFn` trait. This is really just a // special annotation that is *always* considered to match, no // matter what the type parameters are etc. if self.tcx().lang_items.phantom_fn() == Some(obligation.predicate.def_id()) { candidates.vec.push(PhantomFnCandidate); return Ok(candidates); } // Other bounds. Consider both in-scope bounds from fn decl // and applicable impls. There is a certain set of precedence rules here. match self.tcx().lang_items.to_builtin_kind(obligation.predicate.def_id()) { Some(ty::BoundCopy) => { debug!("obligation self ty is {}", obligation.predicate.0.self_ty().repr(self.tcx())); // User-defined copy impls are permitted, but only for // structs and enums. try!(self.assemble_candidates_from_impls(obligation, &mut candidates)); // For other types, we'll use the builtin rules. try!(self.assemble_builtin_bound_candidates(ty::BoundCopy, stack, &mut candidates)); } Some(bound @ ty::BoundSized) => { // Sized is never implementable by end-users, it is // always automatically computed. try!(self.assemble_builtin_bound_candidates(bound, stack, &mut candidates)); } Some(ty::BoundSend) | Some(ty::BoundSync) | None => { try!(self.assemble_closure_candidates(obligation, &mut candidates)); try!(self.assemble_fn_pointer_candidates(obligation, &mut candidates)); try!(self.assemble_candidates_from_impls(obligation, &mut candidates)); self.assemble_candidates_from_object_ty(obligation, &mut candidates); } } self.assemble_candidates_from_projected_tys(obligation, &mut candidates); try!(self.assemble_candidates_from_caller_bounds(stack, &mut candidates)); // Default implementations have lower priority, so we only // consider triggering a default if there is no other impl that can apply. if candidates.vec.len() == 0 { try!(self.assemble_candidates_from_default_impls(obligation, &mut candidates)); } debug!("candidate list size: {}", candidates.vec.len()); Ok(candidates) } fn assemble_candidates_from_projected_tys(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) { let poly_trait_predicate = self.infcx().resolve_type_vars_if_possible(&obligation.predicate); debug!("assemble_candidates_for_projected_tys({},{})", obligation.repr(self.tcx()), poly_trait_predicate.repr(self.tcx())); // FIXME(#20297) -- just examining the self-type is very simplistic // before we go into the whole skolemization thing, just // quickly check if the self-type is a projection at all. let trait_def_id = match poly_trait_predicate.0.trait_ref.self_ty().sty { ty::ty_projection(ref data) => data.trait_ref.def_id, ty::ty_infer(ty::TyVar(_)) => { // If the self-type is an inference variable, then it MAY wind up // being a projected type, so induce an ambiguity. // // FIXME(#20297) -- being strict about this can cause // inference failures with BorrowFrom, which is // unfortunate. Can we do better here? debug!("assemble_candidates_for_projected_tys: ambiguous self-type"); candidates.ambiguous = true; return; } _ => { return; } }; debug!("assemble_candidates_for_projected_tys: trait_def_id={}", trait_def_id.repr(self.tcx())); let result = self.infcx.probe(|snapshot| { self.match_projection_obligation_against_bounds_from_trait(obligation, snapshot) }); if result { candidates.vec.push(ProjectionCandidate); } } fn match_projection_obligation_against_bounds_from_trait( &mut self, obligation: &TraitObligation<'tcx>, snapshot: &infer::CombinedSnapshot) -> bool { let poly_trait_predicate = self.infcx().resolve_type_vars_if_possible(&obligation.predicate); let (skol_trait_predicate, skol_map) = self.infcx().skolemize_late_bound_regions(&poly_trait_predicate, snapshot); debug!("match_projection_obligation_against_bounds_from_trait: \ skol_trait_predicate={} skol_map={}", skol_trait_predicate.repr(self.tcx()), skol_map.repr(self.tcx())); let projection_trait_ref = match skol_trait_predicate.trait_ref.self_ty().sty { ty::ty_projection(ref data) => &data.trait_ref, _ => { self.tcx().sess.span_bug( obligation.cause.span, &format!("match_projection_obligation_against_bounds_from_trait() called \ but self-ty not a projection: {}", skol_trait_predicate.trait_ref.self_ty().repr(self.tcx()))); } }; debug!("match_projection_obligation_against_bounds_from_trait: \ projection_trait_ref={}", projection_trait_ref.repr(self.tcx())); let trait_predicates = ty::lookup_predicates(self.tcx(), projection_trait_ref.def_id); let bounds = trait_predicates.instantiate(self.tcx(), projection_trait_ref.substs); debug!("match_projection_obligation_against_bounds_from_trait: \ bounds={}", bounds.repr(self.tcx())); let matching_bound = util::elaborate_predicates(self.tcx(), bounds.predicates.into_vec()) .filter_to_traits() .find( |bound| self.infcx.probe( |_| self.match_projection(obligation, bound.clone(), skol_trait_predicate.trait_ref.clone(), &skol_map, snapshot))); debug!("match_projection_obligation_against_bounds_from_trait: \ matching_bound={}", matching_bound.repr(self.tcx())); match matching_bound { None => false, Some(bound) => { // Repeat the successful match, if any, this time outside of a probe. let result = self.match_projection(obligation, bound, skol_trait_predicate.trait_ref.clone(), &skol_map, snapshot); assert!(result); true } } } fn match_projection(&mut self, obligation: &TraitObligation<'tcx>, trait_bound: ty::PolyTraitRef<'tcx>, skol_trait_ref: Rc<ty::TraitRef<'tcx>>, skol_map: &infer::SkolemizationMap, snapshot: &infer::CombinedSnapshot) -> bool { assert!(!skol_trait_ref.has_escaping_regions()); let origin = infer::RelateOutputImplTypes(obligation.cause.span); match self.infcx.sub_poly_trait_refs(false, origin, trait_bound.clone(), ty::Binder(skol_trait_ref.clone())) { Ok(()) => { } Err(_) => { return false; } } self.infcx.leak_check(skol_map, snapshot).is_ok() } /// Given an obligation like `<SomeTrait for T>`, search the obligations that the caller /// supplied to find out whether it is listed among them. /// /// Never affects inference environment. fn assemble_candidates_from_caller_bounds<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { debug!("assemble_candidates_from_caller_bounds({})", stack.obligation.repr(self.tcx())); let caller_trait_refs: Vec<_> = self.param_env().caller_bounds.iter() .filter_map(|o| o.to_opt_poly_trait_ref()) .collect(); let all_bounds = util::transitive_bounds( self.tcx(), &caller_trait_refs[..]); let matching_bounds = all_bounds.filter( |bound| self.evaluate_where_clause(stack, bound.clone()).may_apply()); let param_candidates = matching_bounds.map(|bound| ParamCandidate(bound)); candidates.vec.extend(param_candidates); Ok(()) } fn evaluate_where_clause<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>, where_clause_trait_ref: ty::PolyTraitRef<'tcx>) -> EvaluationResult<'tcx> { self.infcx().probe(move |_| { match self.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) { Ok(obligations) => { self.evaluate_predicates_recursively(stack.list(), obligations.iter()) } Err(()) => { EvaluatedToErr(Unimplemented) } } }) } /// Check for the artificial impl that the compiler will create for an obligation like `X : /// FnMut<..>` where `X` is a closure type. /// /// Note: the type parameters on a closure candidate are modeled as *output* type /// parameters and hence do not affect whether this trait is a match or not. They will be /// unified during the confirmation step. fn assemble_closure_candidates(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { let kind = match self.tcx().lang_items.fn_trait_kind(obligation.predicate.0.def_id()) { Some(k) => k, None => { return Ok(()); } }; // ok to skip binder because the substs on closure types never // touch bound regions, they just capture the in-scope // type/region parameters let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); let (closure_def_id, substs) = match self_ty.sty { ty::ty_closure(id, ref substs) => (id, substs.clone()), ty::ty_infer(ty::TyVar(_)) => { debug!("assemble_unboxed_closure_candidates: ambiguous self-type"); candidates.ambiguous = true; return Ok(()); } _ => { return Ok(()); } }; debug!("assemble_unboxed_candidates: self_ty={} kind={:?} obligation={}", self_ty.repr(self.tcx()), kind, obligation.repr(self.tcx())); match self.closure_typer.closure_kind(closure_def_id) { Some(closure_kind) => { debug!("assemble_unboxed_candidates: closure_kind = {:?}", closure_kind); if closure_kind.extends(kind) { candidates.vec.push(ClosureCandidate(closure_def_id, substs.clone())); } } None => { debug!("assemble_unboxed_candidates: closure_kind not yet known"); candidates.ambiguous = true; } } Ok(()) } /// Implement one of the `Fn()` family for a fn pointer. fn assemble_fn_pointer_candidates(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { // We provide impl of all fn traits for fn pointers. if self.tcx().lang_items.fn_trait_kind(obligation.predicate.def_id()).is_none() { return Ok(()); } // ok to skip binder because what we are inspecting doesn't involve bound regions let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); match self_ty.sty { ty::ty_infer(ty::TyVar(_)) => { debug!("assemble_fn_pointer_candidates: ambiguous self-type"); candidates.ambiguous = true; // could wind up being a fn() type } // provide an impl, but only for suitable `fn` pointers ty::ty_bare_fn(_, &ty::BareFnTy { unsafety: ast::Unsafety::Normal, abi: abi::Rust, sig: ty::Binder(ty::FnSig { inputs: _, output: ty::FnConverging(_), variadic: false }) }) => { candidates.vec.push(FnPointerCandidate); } _ => { } } Ok(()) } /// Search for impls that might apply to `obligation`. fn
(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(), SelectionError<'tcx>> { debug!("assemble_candidates_from_impls(obligation={})", obligation.repr(self.tcx())); let def_id = obligation.predicate.def_id(); let all_impls = self.all_impls(def_id); for &impl_def_id in &all_impls { self.infcx.probe(|snapshot| { let (skol_obligation_trait_pred, skol_map) = self.infcx().skolemize_late_bound_regions(&obligation.predicate, snapshot); match self.match_impl(impl_def_id, obligation, snapshot, &skol_map, skol_obligation_trait_pred.trait_ref.clone()) { Ok(_) => { candidates.vec.push(ImplCandidate(impl_def_id)); } Err(()) => { } } }); } Ok(()) } fn assemble_candidates_from_default_impls(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(), SelectionError<'tcx>> { // OK to skip binder here because the tests we do below do not involve bound regions let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); debug!("assemble_candidates_from_default_impls(self_ty={})", self_ty.repr(self.tcx())); let def_id = obligation.predicate.def_id(); if ty::trait_has_default_impl(self.tcx(), def_id) { match self_ty.sty { ty::ty_trait(..) => { // For object types, we don't know what the closed // over types are. For most traits, this means we // conservatively say nothing; a candidate may be // added by `assemble_candidates_from_object_ty`. // However, for the kind of magic reflect trait, // we consider it to be implemented even for // object types, because it just lets you reflect // onto the object type, not into the object's // interior. if ty::has_attr(self.tcx(), def_id, "rustc_reflect_like") { candidates.vec.push(DefaultImplObjectCandidate(def_id)); } } ty::ty_param(..) | ty::ty_projection(..) => { // In these cases, we don't know what the actual // type is. Therefore, we cannot break it down // into its constituent types. So we don't // consider the `..` impl but instead just add no // candidates: this means that typeck will only // succeed if there is another reason to believe // that this obligation holds. That could be a // where-clause or, in the case of an object type, // it could be that the object type lists the // trait (e.g. `Foo+Send : Send`). See // `compile-fail/typeck-default-trait-impl-send-param.rs` // for an example of a test case that exercises // this path. } ty::ty_infer(ty::TyVar(_)) => { // the defaulted impl might apply, we don't know candidates.ambiguous = true; } _ => { if self.constituent_types_for_ty(self_ty).is_some() { candidates.vec.push(DefaultImplCandidate(def_id.clone())) } else { // We don't yet know what the constituent // types are. So call it ambiguous for now, // though this is a bit stronger than // necessary: that is, we know that the // defaulted impl applies, but we can't // process the confirmation step without // knowing the constituent types. (Anyway, in // the particular case of defaulted impls, it // doesn't really matter much either way, // since we won't be aiding inference by // processing the confirmation step.) candidates.ambiguous = true; } } } } Ok(()) } /// Search for impls that might apply to `obligation`. fn assemble_candidates_from_object_ty(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) { debug!("assemble_candidates_from_object_ty(self_ty={})", self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()).repr(self.tcx())); // Object-safety candidates are only applicable to object-safe // traits. Including this check is useful because it helps // inference in cases of traits like `BorrowFrom`, which are // not object-safe, and which rely on being able to infer the // self-type from one of the other inputs. Without this check, // these cases wind up being considered ambiguous due to a // (spurious) ambiguity introduced here. let predicate_trait_ref = obligation.predicate.to_poly_trait_ref(); if !object_safety::is_object_safe(self.tcx(), predicate_trait_ref.def_id()) { return; } self.infcx.commit_if_ok(|snapshot| { let bound_self_ty = self.infcx.resolve_type_vars_if_possible(&obligation.self_ty()); let (self_ty, _) = self.infcx().skolemize_late_bound_regions(&bound_self_ty, snapshot); let poly_trait_ref = match self_ty.sty { ty::ty_trait(ref data) => { match self.tcx().lang_items.to_builtin_kind(obligation.predicate.def_id()) { Some(bound @ ty::BoundSend) | Some(bound @ ty::BoundSync) => { if data.bounds.builtin_bounds.contains(&bound) { debug!("assemble_candidates_from_object_ty: matched builtin bound, \ pushing candidate"); candidates.vec.push(BuiltinObjectCandidate); return Ok(()); } } _ => {} } data.principal_trait_ref_with_self_ty(self.tcx(), self_ty) } ty::ty_infer(ty::TyVar(_)) => { debug!("assemble_candidates_from_object_ty: ambiguous"); candidates.ambiguous = true; // could wind up being an object type return Ok(()); } _ => { return Ok(()); } }; debug!("assemble_candidates_from_object_ty: poly_trait_ref={}", poly_trait_ref.repr(self.tcx())); // see whether the object trait can be upcast to the trait we are looking for let upcast_trait_refs = self.upcast(poly_trait_ref, obligation); if upcast_trait_refs.len() > 1 { // can be upcast in many ways; need more type information candidates.ambiguous = true; } else if upcast_trait_refs.len() == 1 { candidates.vec.push(ObjectCandidate); } Ok::<(),()>(()) }).unwrap(); } /////////////////////////////////////////////////////////////////////////// // WINNOW // // Winnowing is the process of attempting to resolve ambiguity by // probing further. During the winnowing process, we unify all // type variables (ignoring skolemization) and then we also // attempt to evaluate recursive bounds to see if they are // satisfied. /// Further evaluate `candidate` to decide whether all type parameters match and whether nested /// obligations are met. Returns true if `candidate` remains viable after this further /// scrutiny. fn winnow_candidate<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>, candidate: &SelectionCandidate<'tcx>) -> EvaluationResult<'tcx> { debug!("winnow_candidate: candidate={}", candidate.repr(self.tcx())); let result = self.infcx.probe(|_| { let candidate = (*candidate).clone(); match self.confirm_candidate(stack.obligation, candidate) { Ok(selection) => self.winnow_selection(stack.list(), selection), Err(error) => EvaluatedToErr(error), } }); debug!("winnow_candidate depth={} result={:?}", stack.obligation.recursion_depth, result); result } fn winnow_selection<'o>(&mut self, stack: TraitObligationStackList<'o,'tcx>, selection: Selection<'tcx>) -> EvaluationResult<'tcx> { self.evaluate_predicates_recursively(stack, selection.iter_nested()) } /// Returns true if `candidate_i` should be dropped in favor of /// `candidate_j`. Generally speaking we will drop duplicate /// candidates and prefer where-clause candidates. fn candidate_should_be_dropped_in_favor_of<'o>(&mut self, candidate_i: &SelectionCandidate<'tcx>, candidate_j: &SelectionCandidate<'tcx>) -> bool { if candidate_i == candidate_j { return true; } match (candidate_i, candidate_j) { (&ImplCandidate(..), &ParamCandidate(..)) | (&ClosureCandidate(..), &ParamCandidate(..)) | (&FnPointerCandidate(..), &ParamCandidate(..)) | (&BuiltinObjectCandidate(..), &ParamCandidate(_)) | (&BuiltinCandidate(..), &ParamCandidate(..)) => { // We basically prefer always prefer to use a // where-clause over another option. Where clauses // impose the burden of finding the exact match onto // the caller. Using an impl in preference of a where // clause can also lead us to "overspecialize", as in // #18453. true } (&DefaultImplCandidate(_), _) => { // Prefer other candidates over default implementations. self.tcx().sess.bug( "default implementations shouldn't be recorded \ when there are other valid candidates"); } (&ProjectionCandidate, &ParamCandidate(_)) => { // FIXME(#20297) -- this gives where clauses precedent // over projections. Really these are just two means // of deducing information (one based on the where // clauses on the trait definition; one based on those // on the enclosing scope), and it'd be better to // integrate them more intelligently. But for now this // seems ok. If we DON'T give where clauses // precedence, we run into trouble in default methods, // where both the projection bounds for `Self::A` and // the where clauses are in scope. true } _ => { false } } } /////////////////////////////////////////////////////////////////////////// // BUILTIN BOUNDS // // These cover the traits that are built-in to the language // itself. This includes `Copy` and `Sized` for sure. For the // moment, it also includes `Send` / `Sync` and a few others, but // those will hopefully change to library-defined traits in the // future. fn assemble_builtin_bound_candidates<'o>(&mut self, bound: ty::BuiltinBound, stack: &TraitObligationStack<'o, 'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { match self.builtin_bound(bound, stack.obligation) { Ok(If(..)) => { debug!("builtin_bound: bound={}", bound.repr(self.tcx())); candidates.vec.push(BuiltinCandidate(bound)); Ok(()) } Ok(ParameterBuiltin) => { Ok(()) } Ok(AmbiguousBuiltin) => { debug!("assemble_builtin_bound_candidates: ambiguous builtin"); Ok(candidates.ambiguous = true) } Err(e) => { Err(e) } } } fn builtin_bound(&mut self, bound: ty::BuiltinBound, obligation: &TraitObligation<'tcx>) -> Result<BuiltinBoundConditions<'tcx>,SelectionError<'tcx>> { // Note: these tests operate on types that may contain bound // regions. To be proper, we ought to skolemize here, but we // forego the skolemization and defer it until the // confirmation step. let self_ty = self.infcx.shallow_resolve(obligation.predicate.0.self_ty()); return match self_ty.sty { ty::ty_infer(ty::IntVar(_)) | ty::ty_infer(ty::FloatVar(_)) | ty::ty_uint(_) | ty::ty_int(_) | ty::ty_bool | ty::ty_float(_) | ty::ty_bare_fn(..) | ty::ty_char => { // safe for everything ok_if(Vec::new()) } ty::ty_uniq(_) => { // Box<T> match bound { ty::BoundCopy => Err(Unimplemented), ty::BoundSized => ok_if(Vec::new()), ty::BoundSync | ty::BoundSend => { self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()"); } } } ty::ty_ptr(..) => { // *const T, *mut T match bound { ty::BoundCopy | ty::BoundSized => ok_if(Vec::new()), ty::BoundSync | ty::BoundSend => { self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()"); } } } ty::ty_trait(ref data) => { match bound { ty::BoundSized => Err(Unimplemented), ty::BoundCopy => { if data.bounds.builtin_bounds.contains(&bound) { ok_if(Vec::new()) } else { // Recursively check all supertraits to find out if any further // bounds are required and thus we must fulfill. let principal = data.principal_trait_ref_with_self_ty(self.tcx(), self.tcx().types.err); let desired_def_id = obligation.predicate.def_id(); for tr in util::supertraits(self.tcx(), principal) { if tr.def_id() == desired_def_id { return ok_if(Vec::new()) } } Err(Unimplemented) } } ty::BoundSync | ty::BoundSend => { self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()"); } } } ty::ty_rptr(_, ty::mt { ty: _, mutbl }) => { // &mut T or &T match bound { ty::BoundCopy => { match mutbl { // &mut T is affine and hence never `Copy` ast::MutMutable => Err(Unimplemented), // &T is always copyable ast::MutImmutable => ok_if(Vec::new()), } } ty::BoundSized => ok_if(Vec::new()), ty::BoundSync | ty::BoundSend => { self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()"); } } } ty::ty_vec(element_ty, ref len) => { // [T, ..n] and [T] match bound { ty::BoundCopy => { match *len { // [T, ..n] is copy iff T is copy Some(_) => ok_if(vec![element_ty]), // [T] is unsized and hence affine None => Err(Unimplemented), } } ty::BoundSized => { if len.is_some() { ok_if(Vec::new()) } else { Err(Unimplemented) } } ty::BoundSync | ty::BoundSend => { self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()"); } } } ty::ty_str => { // Equivalent to [u8] match bound { ty::BoundSync | ty::BoundSend => { self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()"); } ty::BoundCopy | ty::BoundSized => Err(Unimplemented), } } // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet ty::ty_tup(ref tys) => ok_if(tys.clone()), ty::ty_closure(def_id, substs) => { // FIXME -- This case is tricky. In the case of by-ref // closures particularly, we need the results of // inference to decide how to reflect the type of each // upvar (the upvar may have type `T`, but the runtime // type could be `&mut`, `&`, or just `T`). For now, // though, we'll do this unsoundly and assume that all // captures are by value. Really what we ought to do // is reserve judgement and then intertwine this // analysis with closure inference. assert_eq!(def_id.krate, ast::LOCAL_CRATE); // Unboxed closures shouldn't be // implicitly copyable if bound == ty::BoundCopy { return Ok(ParameterBuiltin); } // Upvars are always local variables or references to // local variables, and local variables cannot be // unsized, so the closure struct as a whole must be // Sized. if bound == ty::BoundSized { return ok_if(Vec::new()); } match self.closure_typer.closure_upvars(def_id, substs) { Some(upvars) => ok_if(upvars.iter().map(|c| c.ty).collect()), None => { debug!("assemble_builtin_bound_candidates: no upvar types available yet"); Ok(AmbiguousBuiltin) } } } ty::ty_struct(def_id, substs) => { let types: Vec<Ty> = ty::struct_fields(self.tcx(), def_id, substs).iter() .map(|f| f.mt.ty) .collect(); nominal(bound, types) } ty::ty_enum(def_id, substs) => { let types: Vec<Ty> = ty::substd_enum_variants(self.tcx(), def_id, substs) .iter() .flat_map(|variant| variant.args.iter()) .cloned() .collect(); nominal(bound, types) } ty::ty_projection(_) | ty::ty_param(_) => { // Note: A type parameter is only considered to meet a // particular bound if there is a where clause telling // us that it does, and that case is handled by // `assemble_candidates_from_caller_bounds()`. Ok(ParameterBuiltin) } ty::ty_infer(ty::TyVar(_)) => { // Unbound type variable. Might or might not have // applicable impls and so forth, depending on what // those type variables wind up being bound to. debug!("assemble_builtin_bound_candidates: ambiguous builtin"); Ok(AmbiguousBuiltin) } ty::ty_err => ok_if(Vec::new()), ty::ty_infer(ty::FreshTy(_)) | ty::ty_infer(ty::FreshIntTy(_)) => { self.tcx().sess.bug( &format!( "asked to assemble builtin bounds of unexpected type: {}", self_ty.repr(self.tcx()))); } }; fn ok_if<'tcx>(v: Vec<Ty<'tcx>>) -> Result<BuiltinBoundConditions<'tcx>, SelectionError<'tcx>> { Ok(If(ty::Binder(v))) } fn nominal<'cx, 'tcx>(bound: ty::BuiltinBound, types: Vec<Ty<'tcx>>) -> Result<BuiltinBoundConditions<'tcx>, SelectionError<'tcx>> { // First check for markers and other nonsense. match bound { // Fallback to whatever user-defined impls exist in this case. ty::BoundCopy => Ok(ParameterBuiltin), // Sized if all the component types are sized. ty::BoundSized => ok_if(types), // Shouldn't be coming through here. ty::BoundSend | ty::BoundSync => unreachable!(), } } } /// For default impls, we need to break apart a type into its /// "constituent types" -- meaning, the types that it contains. /// /// Here are some (simple) examples: /// /// ``` /// (i32, u32) -> [i32, u32] /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32] /// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32] /// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32] /// ``` fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Option<Vec<Ty<'tcx>>> { match t.sty { ty::ty_uint(_) | ty::ty_int(_) | ty::ty_bool | ty::ty_float(_) | ty::ty_bare_fn(..) | ty::ty_str | ty::ty_err | ty::ty_infer(ty::IntVar(_)) | ty::ty_infer(ty::FloatVar(_)) | ty::ty_char => { Some(Vec::new()) } ty::ty_trait(..) | ty::ty_param(..) | ty::ty_projection(..) | ty::ty_infer(ty::TyVar(_)) | ty::ty_infer(ty::FreshTy(_)) | ty::ty_infer(ty::FreshIntTy(_)) => { self.tcx().sess.bug( &format!( "asked to assemble constituent types of unexpected type: {}", t.repr(self.tcx()))); } ty::ty_uniq(referent_ty) => { // Box<T> Some(vec![referent_ty]) } ty::ty_ptr(ty::mt { ty: element_ty, ..}) | ty::ty_rptr(_, ty::mt { ty: element_ty, ..}) => { Some(vec![element_ty]) }, ty::ty_vec(element_ty, _) => { Some(vec![element_ty]) } ty::ty_tup(ref tys) => { // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet Some(tys.clone()) } ty::ty_closure(def_id, substs) => { assert_eq!(def_id.krate, ast::LOCAL_CRATE); match self.closure_typer.closure_upvars(def_id, substs) { Some(upvars) => { Some(upvars.iter().map(|c| c.ty).collect()) } None => { None } } } // for `PhantomData<T>`, we pass `T` ty::ty_struct(def_id, substs) if Some(def_id) == self.tcx().lang_items.phantom_data() => { Some(substs.types.get_slice(TypeSpace).to_vec()) } ty::ty_struct(def_id, substs) => { Some(ty::struct_fields(self.tcx(), def_id, substs).iter() .map(|f| f.mt.ty) .collect()) } ty::ty_enum(def_id, substs) => { Some(ty::substd_enum_variants(self.tcx(), def_id, substs) .iter() .flat_map(|variant| variant.args.iter()) .map(|&ty| ty) .collect()) } } } fn collect_predicates_for_types(&mut self, obligation: &TraitObligation<'tcx>, trait_def_id: ast::DefId, types: ty::Binder<Vec<Ty<'tcx>>>) -> Vec<PredicateObligation<'tcx>> { let derived_cause = match self.tcx().lang_items.to_builtin_kind(trait_def_id) { Some(_) => { self.derived_cause(obligation, BuiltinDerivedObligation) }, None => { self.derived_cause(obligation, ImplDerivedObligation) } }; // Because the types were potentially derived from // higher-ranked obligations they may reference late-bound // regions. For example, `for<'a> Foo<&'a int> : Copy` would // yield a type like `for<'a> &'a int`. In general, we // maintain the invariant that we never manipulate bound // regions, so we have to process these bound regions somehow. // // The strategy is to: // // 1. Instantiate those regions to skolemized regions (e.g., // `for<'a> &'a int` becomes `&0 int`. // 2. Produce something like `&'0 int : Copy` // 3. Re-bind the regions back to `for<'a> &'a int : Copy` // Move the binder into the individual types let bound_types: Vec<ty::Binder<Ty<'tcx>>> = types.skip_binder() .iter() .map(|&nested_ty| ty::Binder(nested_ty)) .collect(); // For each type, produce a vector of resulting obligations let obligations: Result<Vec<Vec<_>>, _> = bound_types.iter().map(|nested_ty| { self.infcx.commit_if_ok(|snapshot| { let (skol_ty, skol_map) = self.infcx().skolemize_late_bound_regions(nested_ty, snapshot); let Normalized { value: normalized_ty, mut obligations } = project::normalize_with_depth(self, obligation.cause.clone(), obligation.recursion_depth + 1, &skol_ty); let skol_obligation = try!(util::predicate_for_trait_def(self.tcx(), derived_cause.clone(), trait_def_id, obligation.recursion_depth + 1, normalized_ty)); obligations.push(skol_obligation); Ok(self.infcx().plug_leaks(skol_map, snapshot, &obligations)) }) }).collect(); // Flatten those vectors (couldn't do it above due `collect`) match obligations { Ok(obligations) => obligations.into_iter().flat_map(|o| o.into_iter()).collect(), Err(ErrorReported) => Vec::new(), } } /////////////////////////////////////////////////////////////////////////// // CONFIRMATION // // Confirmation unifies the output type parameters of the trait // with the values found in the obligation, possibly yielding a // type error. See `README.md` for more details. fn confirm_candidate(&mut self, obligation: &TraitObligation<'tcx>, candidate: SelectionCandidate<'tcx>) -> Result<Selection<'tcx>,SelectionError<'tcx>> { debug!("confirm_candidate({}, {})", obligation.repr(self.tcx()), candidate.repr(self.tcx())); match candidate { BuiltinCandidate(builtin_bound) => { Ok(VtableBuiltin( try!(self.confirm_builtin_candidate(obligation, builtin_bound)))) } PhantomFnCandidate | ErrorCandidate => { Ok(VtableBuiltin(VtableBuiltinData { nested: VecPerParamSpace::empty() })) } ParamCandidate(param) => { let obligations = self.confirm_param_candidate(obligation, param); Ok(VtableParam(obligations)) } DefaultImplCandidate(trait_def_id) => { let data = self.confirm_default_impl_candidate(obligation, trait_def_id); Ok(VtableDefaultImpl(data)) } DefaultImplObjectCandidate(trait_def_id) => { let data = self.confirm_default_impl_object_candidate(obligation, trait_def_id); Ok(VtableDefaultImpl(data)) } ImplCandidate(impl_def_id) => { let vtable_impl = try!(self.confirm_impl_candidate(obligation, impl_def_id)); Ok(VtableImpl(vtable_impl)) } ClosureCandidate(closure_def_id, substs) => { try!(self.confirm_closure_candidate(obligation, closure_def_id, &substs)); Ok(VtableClosure(closure_def_id, substs)) } BuiltinObjectCandidate => { // This indicates something like `(Trait+Send) : // Send`. In this case, we know that this holds // because that's what the object type is telling us, // and there's really no additional obligations to // prove and no types in particular to unify etc. Ok(VtableParam(Vec::new())) } ObjectCandidate => { let data = self.confirm_object_candidate(obligation); Ok(VtableObject(data)) } FnPointerCandidate => { let fn_type = try!(self.confirm_fn_pointer_candidate(obligation)); Ok(VtableFnPointer(fn_type)) } ProjectionCandidate => { self.confirm_projection_candidate(obligation); Ok(VtableParam(Vec::new())) } } } fn confirm_projection_candidate(&mut self, obligation: &TraitObligation<'tcx>) { let _: Result<(),()> = self.infcx.commit_if_ok(|snapshot| { let result = self.match_projection_obligation_against_bounds_from_trait(obligation, snapshot); assert!(result); Ok(()) }); } fn confirm_param_candidate(&mut self, obligation: &TraitObligation<'tcx>, param: ty::PolyTraitRef<'tcx>) -> Vec<PredicateObligation<'tcx>> { debug!("confirm_param_candidate({},{})", obligation.repr(self.tcx()), param.repr(self.tcx())); // During evaluation, we already checked that this // where-clause trait-ref could be unified with the obligation // trait-ref. Repeat that unification now without any // transactional boundary; it should not fail. match self.match_where_clause_trait_ref(obligation, param.clone()) { Ok(obligations) => obligations, Err(()) => { self.tcx().sess.bug( &format!("Where clause `{}` was applicable to `{}` but now is not", param.repr(self.tcx()), obligation.repr(self.tcx()))); } } } fn confirm_builtin_candidate(&mut self, obligation: &TraitObligation<'tcx>, bound: ty::BuiltinBound) -> Result<VtableBuiltinData<PredicateObligation<'tcx>>, SelectionError<'tcx>> { debug!("confirm_builtin_candidate({})", obligation.repr(self.tcx())); match try!(self.builtin_bound(bound, obligation)) { If(nested) => Ok(self.vtable_builtin_data(obligation, bound, nested)), AmbiguousBuiltin | ParameterBuiltin => { self.tcx().sess.span_bug( obligation.cause.span, &format!("builtin bound for {} was ambig", obligation.repr(self.tcx()))); } } } fn vtable_builtin_data(&mut self, obligation: &TraitObligation<'tcx>, bound: ty::BuiltinBound, nested: ty::Binder<Vec<Ty<'tcx>>>) -> VtableBuiltinData<PredicateObligation<'tcx>> { let trait_def = match self.tcx().lang_items.from_builtin_kind(bound) { Ok(def_id) => def_id, Err(_) => { self.tcx().sess.bug("builtin trait definition not found"); } }; let obligations = self.collect_predicates_for_types(obligation, trait_def, nested); let obligations = VecPerParamSpace::new(obligations, Vec::new(), Vec::new()); debug!("vtable_builtin_data: obligations={}", obligations.repr(self.tcx())); VtableBuiltinData { nested: obligations } } /// This handles the case where a `impl Foo for ..` impl is being used. /// The idea is that the impl applies to `X : Foo` if the following conditions are met: /// /// 1. For each constituent type `Y` in `X`, `Y : Foo` holds /// 2. For each where-clause `C` declared on `Foo`, `[Self => X] C` holds. fn confirm_default_impl_candidate(&mut self, obligation: &TraitObligation<'tcx>, trait_def_id: ast::DefId) -> VtableDefaultImplData<PredicateObligation<'tcx>> { debug!("confirm_default_impl_candidate({}, {})", obligation.repr(self.tcx()), trait_def_id.repr(self.tcx())); // binder is moved below let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty()); match self.constituent_types_for_ty(self_ty) { Some(types) => self.vtable_default_impl(obligation, trait_def_id, ty::Binder(types)), None => { self.tcx().sess.bug( &format!( "asked to confirm default implementation for ambiguous type: {}", self_ty.repr(self.tcx()))); } } } fn confirm_default_impl_object_candidate(&mut self, obligation: &TraitObligation<'tcx>, trait_def_id: ast::DefId) -> VtableDefaultImplData<PredicateObligation<'tcx>> { debug!("confirm_default_impl_object_candidate({}, {})", obligation.repr(self.tcx()), trait_def_id.repr(self.tcx())); assert!(ty::has_attr(self.tcx(), trait_def_id, "rustc_reflect_like")); // OK to skip binder, it is reintroduced below let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty()); match self_ty.sty { ty::ty_trait(ref data) => { // OK to skip the binder, it is reintroduced below let input_types = data.principal.skip_binder().substs.types.get_slice(TypeSpace); let assoc_types = data.bounds.projection_bounds .iter() .map(|pb| pb.skip_binder().ty); let all_types: Vec<_> = input_types.iter().cloned() .chain(assoc_types) .collect(); // reintroduce the two binding levels we skipped, then flatten into one let all_types = ty::Binder(ty::Binder(all_types)); let all_types = ty::flatten_late_bound_regions(self.tcx(), &all_types); self.vtable_default_impl(obligation, trait_def_id, all_types) } _ => { self.tcx().sess.bug( &format!( "asked to confirm default object implementation for non-object type: {}", self_ty.repr(self.tcx()))); } } } /// See `confirm_default_impl_candidate` fn vtable_default_impl(&mut self, obligation: &TraitObligation<'tcx>, trait_def_id: ast::DefId, nested: ty::Binder<Vec<Ty<'tcx>>>) -> VtableDefaultImplData<PredicateObligation<'tcx>> { debug!("vtable_default_impl_data: nested={}", nested.repr(self.tcx())); let mut obligations = self.collect_predicates_for_types(obligation, trait_def_id, nested); let trait_obligations: Result<VecPerParamSpace<_>,()> = self.infcx.commit_if_ok(|snapshot| { let poly_trait_ref = obligation.predicate.to_poly_trait_ref(); let (trait_ref, skol_map) = self.infcx().skolemize_late_bound_regions(&poly_trait_ref, snapshot); Ok(self.impl_or_trait_obligations(obligation.cause.clone(), obligation.recursion_depth + 1, trait_def_id, &trait_ref.substs, skol_map, snapshot)) }); obligations.extend(trait_obligations.unwrap().into_iter()); // no Errors in that code above debug!("vtable_default_impl_data: obligations={}", obligations.repr(self.tcx())); VtableDefaultImplData { trait_def_id: trait_def_id, nested: obligations } } fn confirm_impl_candidate(&mut self, obligation: &TraitObligation<'tcx>, impl_def_id: ast::DefId) -> Result<VtableImplData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> { debug!("confirm_impl_candidate({},{})", obligation.repr(self.tcx()), impl_def_id.repr(self.tcx())); // First, create the substitutions by matching the impl again, // this time not in a probe. self.infcx.commit_if_ok(|snapshot| { let (skol_obligation_trait_ref, skol_map) = self.infcx().skolemize_late_bound_regions(&obligation.predicate, snapshot); let substs = self.rematch_impl(impl_def_id, obligation, snapshot, &skol_map, skol_obligation_trait_ref.trait_ref); debug!("confirm_impl_candidate substs={}", substs.repr(self.tcx())); Ok(self.vtable_impl(impl_def_id, substs, obligation.cause.clone(), obligation.recursion_depth + 1, skol_map, snapshot)) }) } fn vtable_impl(&mut self, impl_def_id: ast::DefId, substs: Normalized<'tcx, Substs<'tcx>>, cause: ObligationCause<'tcx>, recursion_depth: usize, skol_map: infer::SkolemizationMap, snapshot: &infer::CombinedSnapshot) -> VtableImplData<'tcx, PredicateObligation<'tcx>> { debug!("vtable_impl(impl_def_id={}, substs={}, recursion_depth={}, skol_map={})", impl_def_id.repr(self.tcx()), substs.repr(self.tcx()), recursion_depth, skol_map.repr(self.tcx())); let mut impl_obligations = self.impl_or_trait_obligations(cause, recursion_depth, impl_def_id, &substs.value, skol_map, snapshot); debug!("vtable_impl: impl_def_id={} impl_obligations={}", impl_def_id.repr(self.tcx()), impl_obligations.repr(self.tcx())); impl_obligations.extend(TypeSpace, substs.obligations.into_iter()); VtableImplData { impl_def_id: impl_def_id, substs: substs.value, nested: impl_obligations } } fn confirm_object_candidate(&mut self, obligation: &TraitObligation<'tcx>) -> VtableObjectData<'tcx> { debug!("confirm_object_candidate({})", obligation.repr(self.tcx())); // FIXME skipping binder here seems wrong -- we should // probably flatten the binder from the obligation and the // binder from the object. Have to try to make a broken test // case that results. -nmatsakis let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); let poly_trait_ref = match self_ty.sty { ty::ty_trait(ref data) => { data.principal_trait_ref_with_self_ty(self.tcx(), self_ty) } _ => { self.tcx().sess.span_bug(obligation.cause.span, "object candidate with non-object"); } }; // Upcast the object type to the obligation type. There must // be exactly one applicable trait-reference; if this were not // the case, we would have reported an ambiguity error rather // than successfully selecting one of the candidates. let upcast_trait_refs = self.upcast(poly_trait_ref.clone(), obligation); assert_eq!(upcast_trait_refs.len(), 1); let upcast_trait_ref = upcast_trait_refs.into_iter().next().unwrap(); match self.match_poly_trait_ref(obligation, upcast_trait_ref.clone()) { Ok(()) => { } Err(()) => { self.tcx().sess.span_bug(obligation.cause.span, "failed to match trait refs"); } } VtableObjectData { object_ty: self_ty, upcast_trait_ref: upcast_trait_ref } } fn confirm_fn_pointer_candidate(&mut self, obligation: &TraitObligation<'tcx>) -> Result<ty::Ty<'tcx>,SelectionError<'tcx>> { debug!("confirm_fn_pointer_candidate({})", obligation.repr(self.tcx())); // ok to skip binder; it is reintroduced below let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); let sig = ty::ty_fn_sig(self_ty); let trait_ref = util::closure_trait_ref_and_return_type(self.tcx(), obligation.predicate.def_id(), self_ty, sig, util::TupleArgumentsFlag::Yes) .map_bound(|(trait_ref, _)| trait_ref); try!(self.confirm_poly_trait_refs(obligation.cause.clone(), obligation.predicate.to_poly_trait_ref(), trait_ref)); Ok(self_ty) } fn confirm_closure_candidate(&mut self, obligation: &TraitObligation<'tcx>, closure_def_id: ast::DefId, substs: &Substs<'tcx>) -> Result<(),SelectionError<'tcx>> { debug!("confirm_closure_candidate({},{},{})", obligation.repr(self.tcx()), closure_def_id.repr(self.tcx()), substs.repr(self.tcx())); let trait_ref = self.closure_trait_ref(obligation, closure_def_id, substs); debug!("confirm_closure_candidate(closure_def_id={}, trait_ref={})", closure_def_id.repr(self.tcx()), trait_ref.repr(self.tcx())); self.confirm_poly_trait_refs(obligation.cause.clone(), obligation.predicate.to_poly_trait_ref(), trait_ref) } /// In the case of closure types and fn pointers, /// we currently treat the input type parameters on the trait as /// outputs. This means that when we have a match we have only /// considered the self type, so we have to go back and make sure /// to relate the argument types too. This is kind of wrong, but /// since we control the full set of impls, also not that wrong, /// and it DOES yield better error messages (since we don't report /// errors as if there is no applicable impl, but rather report /// errors are about mismatched argument types. /// /// Here is an example. Imagine we have an closure expression /// and we desugared it so that the type of the expression is /// `Closure`, and `Closure` expects an int as argument. Then it /// is "as if" the compiler generated this impl: /// /// impl Fn(int) for Closure { ... } /// /// Now imagine our obligation is `Fn(usize) for Closure`. So far /// we have matched the self-type `Closure`. At this point we'll /// compare the `int` to `usize` and generate an error. /// /// Note that this checking occurs *after* the impl has selected, /// because these output type parameters should not affect the /// selection of the impl. Therefore, if there is a mismatch, we /// report an error to the user. fn confirm_poly_trait_refs(&mut self, obligation_cause: ObligationCause, obligation_trait_ref: ty::PolyTraitRef<'tcx>, expected_trait_ref: ty::PolyTraitRef<'tcx>) -> Result<(), SelectionError<'tcx>> { let origin = infer::RelateOutputImplTypes(obligation_cause.span); let obligation_trait_ref = obligation_trait_ref.clone(); match self.infcx.sub_poly_trait_refs(false, origin, expected_trait_ref.clone(), obligation_trait_ref.clone()) { Ok(()) => Ok(()), Err(e) => Err(OutputTypeParameterMismatch(expected_trait_ref, obligation_trait_ref, e)) } } /////////////////////////////////////////////////////////////////////////// // Matching // // Matching is a common path used for both evaluation and // confirmation. It basically unifies types that appear in impls // and traits. This does affect the surrounding environment; // therefore, when used during evaluation, match routines must be // run inside of a `probe()` so that their side-effects are // contained. fn rematch_impl(&mut self, impl_def_id: ast::DefId, obligation: &TraitObligation<'tcx>, snapshot: &infer::CombinedSnapshot, skol_map: &infer::SkolemizationMap, skol_obligation_trait_ref: Rc<ty::TraitRef<'tcx>>) -> Normalized<'tcx, Substs<'tcx>> { match self.match_impl(impl_def_id, obligation, snapshot, skol_map, skol_obligation_trait_ref) { Ok(substs) => substs, Err(()) => { self.tcx().sess.bug( &format!("Impl {} was matchable against {} but now is not", impl_def_id.repr(self.tcx()), obligation.repr(self.tcx()))); } } } fn match_impl(&mut self, impl_def_id: ast::DefId, obligation: &TraitObligation<'tcx>, snapshot: &infer::CombinedSnapshot, skol_map: &infer::SkolemizationMap, skol_obligation_trait_ref: Rc<ty::TraitRef<'tcx>>) -> Result<Normalized<'tcx, Substs<'tcx>>, ()> { let impl_trait_ref = ty::impl_trait_ref(self.tcx(), impl_def_id).unwrap(); // Before we create the substitutions and everything, first // consider a "quick reject". This avoids creating more types // and so forth that we need to. if self.fast_reject_trait_refs(obligation, &*impl_trait_ref) { return Err(()); } let impl_substs = util::fresh_type_vars_for_impl(self.infcx, obligation.cause.span, impl_def_id); let impl_trait_ref = impl_trait_ref.subst(self.tcx(), &impl_substs); let impl_trait_ref = project::normalize_with_depth(self, obligation.cause.clone(), obligation.recursion_depth + 1, &impl_trait_ref); debug!("match_impl(impl_def_id={}, obligation={}, \ impl_trait_ref={}, skol_obligation_trait_ref={})", impl_def_id.repr(self.tcx()), obligation.repr(self.tcx()), impl_trait_ref.repr(self.tcx()), skol_obligation_trait_ref.repr(self.tcx())); let origin = infer::RelateOutputImplTypes(obligation.cause.span); if let Err(e) = self.infcx.sub_trait_refs(false, origin, impl_trait_ref.value.clone(), skol_obligation_trait_ref) { debug!("match_impl: failed sub_trait_refs due to `{}`", ty::type_err_to_str(self.tcx(), &e)); return Err(()); } if let Err(e) = self.infcx.leak_check(skol_map, snapshot) { debug!("match_impl: failed leak check due to `{}`", ty::type_err_to_str(self.tcx(), &e)); return Err(()); } debug!("match_impl: success impl_substs={}", impl_substs.repr(self.tcx())); Ok(Normalized { value: impl_substs, obligations: impl_trait_ref.obligations }) } fn fast_reject_trait_refs(&mut self, obligation: &TraitObligation, impl_trait_ref: &ty::TraitRef) -> bool { // We can avoid creating type variables and doing the full // substitution if we find that any of the input types, when // simplified, do not match. obligation.predicate.0.input_types().iter() .zip(impl_trait_ref.input_types().iter()) .any(|(&obligation_ty, &impl_ty)| { let simplified_obligation_ty = fast_reject::simplify_type(self.tcx(), obligation_ty, true); let simplified_impl_ty = fast_reject::simplify_type(self.tcx(), impl_ty, false); simplified_obligation_ty.is_some() && simplified_impl_ty.is_some() && simplified_obligation_ty != simplified_impl_ty }) } /// Normalize `where_clause_trait_ref` and try to match it against /// `obligation`. If successful, return any predicates that /// result from the normalization. Normalization is necessary /// because where-clauses are stored in the parameter environment /// unnormalized. fn match_where_clause_trait_ref(&mut self, obligation: &TraitObligation<'tcx>, where_clause_trait_ref: ty::PolyTraitRef<'tcx>) -> Result<Vec<PredicateObligation<'tcx>>,()> { try!(self.match_poly_trait_ref(obligation, where_clause_trait_ref)); Ok(Vec::new()) } /// Returns `Ok` if `poly_trait_ref` being true implies that the /// obligation is satisfied. fn match_poly_trait_ref(&mut self, obligation: &TraitObligation<'tcx>, poly_trait_ref: ty::PolyTraitRef<'tcx>) -> Result<(),()> { debug!("match_poly_trait_ref: obligation={} poly_trait_ref={}", obligation.repr(self.tcx()), poly_trait_ref.repr(self.tcx())); let origin = infer::RelateOutputImplTypes(obligation.cause.span); match self.infcx.sub_poly_trait_refs(false, origin, poly_trait_ref, obligation.predicate.to_poly_trait_ref()) { Ok(()) => Ok(()), Err(_) => Err(()), } } /// Determines whether the self type declared against /// `impl_def_id` matches `obligation_self_ty`. If successful, /// returns the substitutions used to make them match. See /// `match_impl()`. For example, if `impl_def_id` is declared /// as: /// /// impl<T:Copy> Foo for ~T { ... } /// /// and `obligation_self_ty` is `int`, we'd back an `Err(_)` /// result. But if `obligation_self_ty` were `~int`, we'd get /// back `Ok(T=int)`. fn match_inherent_impl(&mut self, impl_def_id: ast::DefId, obligation_cause: &ObligationCause, obligation_self_ty: Ty<'tcx>) -> Result<Substs<'tcx>,()> { // Create fresh type variables for each type parameter declared // on the impl etc. let impl_substs = util::fresh_type_vars_for_impl(self.infcx, obligation_cause.span, impl_def_id); // Find the self type for the impl. let impl_self_ty = ty::lookup_item_type(self.tcx(), impl_def_id).ty; let impl_self_ty = impl_self_ty.subst(self.tcx(), &impl_substs); debug!("match_impl_self_types(obligation_self_ty={}, impl_self_ty={})", obligation_self_ty.repr(self.tcx()), impl_self_ty.repr(self.tcx())); match self.match_self_types(obligation_cause, impl_self_ty, obligation_self_ty) { Ok(()) => { debug!("Matched impl_substs={}", impl_substs.repr(self.tcx())); Ok(impl_substs) } Err(()) => { debug!("NoMatch"); Err(()) } } } fn match_self_types(&mut self, cause: &ObligationCause, // The self type provided by the impl/caller-obligation: provided_self_ty: Ty<'tcx>, // The self type the obligation is for: required_self_ty: Ty<'tcx>) -> Result<(),()> { // FIXME(#5781) -- equating the types is stronger than // necessary. Should consider variance of trait w/r/t Self. let origin = infer::RelateSelfType(cause.span); match self.infcx.eq_types(false, origin, provided_self_ty, required_self_ty) { Ok(()) => Ok(()), Err(_) => Err(()), } } /////////////////////////////////////////////////////////////////////////// // Miscellany fn match_fresh_trait_refs(&self, previous: &ty::PolyTraitRef<'tcx>, current: &ty::PolyTraitRef<'tcx>) -> bool { let mut matcher = ty_match::Match::new(self.tcx()); matcher.relate(previous, current).is_ok() } fn push_stack<'o,'s:'o>(&mut self, previous_stack: TraitObligationStackList<'s, 'tcx>, obligation: &'o TraitObligation<'tcx>) -> TraitObligationStack<'o, 'tcx> { let fresh_trait_ref = obligation.predicate.to_poly_trait_ref().fold_with(&mut self.freshener); TraitObligationStack { obligation: obligation, fresh_trait_ref: fresh_trait_ref, previous: previous_stack, } } /// Returns set of all impls for a given trait. fn all_impls(&self, trait_def_id: ast::DefId) -> Vec<ast::DefId> { ty::populate_implementations_for_trait_if_necessary(self.tcx(), trait_def_id); match self.tcx().trait_impls.borrow().get(&trait_def_id) { None => Vec::new(), Some(impls) => impls.borrow().clone(), } } fn closure_trait_ref(&self, obligation: &TraitObligation<'tcx>, closure_def_id: ast::DefId, substs: &Substs<'tcx>) -> ty::PolyTraitRef<'tcx> { let closure_type = self.closure_typer.closure_type(closure_def_id, substs); let ty::Binder((trait_ref, _)) = util::closure_trait_ref_and_return_type(self.tcx(), obligation.predicate.def_id(), obligation.predicate.0.self_ty(), // (1) &closure_type.sig, util::TupleArgumentsFlag::No); // (1) Feels icky to skip the binder here, but OTOH we know // that the self-type is an unboxed closure type and hence is // in fact unparameterized (or at least does not reference any // regions bound in the obligation). Still probably some // refactoring could make this nicer. ty::Binder(trait_ref) } /// Returns the obligations that are implied by instantiating an /// impl or trait. The obligations are substituted and fully /// normalized. This is used when confirming an impl or default /// impl. fn impl_or_trait_obligations(&mut self, cause: ObligationCause<'tcx>, recursion_depth: usize, def_id: ast::DefId, // of impl or trait substs: &Substs<'tcx>, // for impl or trait skol_map: infer::SkolemizationMap, snapshot: &infer::CombinedSnapshot) -> VecPerParamSpace<PredicateObligation<'tcx>> { debug!("impl_or_trait_obligations(def_id={})", def_id.repr(self.tcx())); let predicates = ty::lookup_predicates(self.tcx(), def_id); let predicates = predicates.instantiate(self.tcx(), substs); let predicates = normalize_with_depth(self, cause.clone(), recursion_depth, &predicates); let predicates = self.infcx().plug_leaks(skol_map, snapshot, &predicates); let mut obligations = util::predicates_for_generics(self.tcx(), cause, recursion_depth, &predicates.value); obligations.extend(TypeSpace, predicates.obligations.into_iter()); obligations } #[allow(unused_comparisons)] fn derived_cause(&self, obligation: &TraitObligation<'tcx>, variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>) -> ObligationCause<'tcx> { /*! * Creates a cause for obligations that are derived from * `obligation` by a recursive search (e.g., for a builtin * bound, or eventually a `impl Foo for ..`). If `obligation` * is itself a derived obligation, this is just a clone, but * otherwise we create a "derived obligation" cause so as to * keep track of the original root obligation for error * reporting. */ // NOTE(flaper87): As of now, it keeps track of the whole error // chain. Ideally, we should have a way to configure this either // by using -Z verbose or just a CLI argument. if obligation.recursion_depth >= 0 { let derived_cause = DerivedObligationCause { parent_trait_ref: obligation.predicate.to_poly_trait_ref(), parent_code: Rc::new(obligation.cause.code.clone()), }; ObligationCause::new(obligation.cause.span, obligation.cause.body_id, variant(derived_cause)) } else { obligation.cause.clone() } } /// Upcasts an object trait-reference into those that match the obligation. fn upcast(&mut self, obj_trait_ref: ty::PolyTraitRef<'tcx>, obligation: &TraitObligation<'tcx>) -> Vec<ty::PolyTraitRef<'tcx>> { debug!("upcast(obj_trait_ref={}, obligation={})", obj_trait_ref.repr(self.tcx()), obligation.repr(self.tcx())); let obligation_def_id = obligation.predicate.def_id(); let mut upcast_trait_refs = util::upcast(self.tcx(), obj_trait_ref, obligation_def_id); // Retain only those upcast versions that match the trait-ref // we are looking for. In particular, we know that all of // `upcast_trait_refs` apply to the correct trait, but // possibly with incorrect type parameters. For example, we // may be trying to upcast `Foo` to `Bar<i32>`, but `Foo` is // declared as `trait Foo : Bar<u32>`. upcast_trait_refs.retain(|upcast_trait_ref| { let upcast_trait_ref = upcast_trait_ref.clone(); self.infcx.probe(|_| self.match_poly_trait_ref(obligation, upcast_trait_ref)).is_ok() }); debug!("upcast: upcast_trait_refs={}", upcast_trait_refs.repr(self.tcx())); upcast_trait_refs } } impl<'tcx> Repr<'tcx> for SelectionCandidate<'tcx> { fn repr(&self, tcx: &ty::ctxt<'tcx>) -> String { match *self { PhantomFnCandidate => format!("PhantomFnCandidate"), ErrorCandidate => format!("ErrorCandidate"), BuiltinCandidate(b) => format!("BuiltinCandidate({:?})", b), BuiltinObjectCandidate => format!("BuiltinObjectCandidate"), ParamCandidate(ref a) => format!("ParamCandidate({})", a.repr(tcx)), ImplCandidate(a) => format!("ImplCandidate({})", a.repr(tcx)), DefaultImplCandidate(t) => format!("DefaultImplCandidate({:?})", t), DefaultImplObjectCandidate(t) => format!("DefaultImplObjectCandidate({:?})", t), ProjectionCandidate => format!("ProjectionCandidate"), FnPointerCandidate => format!("FnPointerCandidate"), ObjectCandidate => format!("ObjectCandidate"), ClosureCandidate(c, ref s) => { format!("ClosureCandidate({:?},{})", c, s.repr(tcx)) } } } } impl<'tcx> SelectionCache<'tcx> { pub fn new() -> SelectionCache<'tcx> { SelectionCache { hashmap: RefCell::new(FnvHashMap()) } } } impl<'o,'tcx> TraitObligationStack<'o,'tcx> { fn list(&'o self) -> TraitObligationStackList<'o,'tcx> { TraitObligationStackList::with(self) } fn iter(&'o self) -> TraitObligationStackList<'o,'tcx> { self.list() } } #[derive(Copy, Clone)] struct TraitObligationStackList<'o,'tcx:'o> { head: Option<&'o TraitObligationStack<'o,'tcx>> } impl<'o,'tcx> TraitObligationStackList<'o,'tcx> { fn empty() -> TraitObligationStackList<'o,'tcx> { TraitObligationStackList { head: None } } fn with(r: &'o TraitObligationStack<'o,'tcx>) -> TraitObligationStackList<'o,'tcx> { TraitObligationStackList { head: Some(r) } } } impl<'o,'tcx> Iterator for TraitObligationStackList<'o,'tcx>{ type Item = &'o TraitObligationStack<'o,'tcx>; fn next(&mut self) -> Option<&'o TraitObligationStack<'o,'tcx>> { match self.head { Some(o) => { *self = o.previous; Some(o) } None => None } } } impl<'o,'tcx> Repr<'tcx> for TraitObligationStack<'o,'tcx> { fn repr(&self, tcx: &ty::ctxt<'tcx>) -> String { format!("TraitObligationStack({})", self.obligation.repr(tcx)) } } impl<'tcx> EvaluationResult<'tcx> { fn may_apply(&self) -> bool { match *self { EvaluatedToOk | EvaluatedToAmbig | EvaluatedToErr(OutputTypeParameterMismatch(..)) => true, EvaluatedToErr(Unimplemented) => false, } } } impl MethodMatchResult { pub fn may_apply(&self) -> bool { match *self { MethodMatched(_) => true, MethodAmbiguous(_) => true, MethodDidNotMatch => false, } } }
assemble_candidates_from_impls
discussions-topic-post-item.js
import { PolymerElement } from '@polymer/polymer/polymer-element.js'; import '@polymer/paper-item/paper-item.js'; import '@polymer/paper-item/paper-item-body.js'; import '@polymer/paper-input/paper-input.js'; import '@polymer/paper-spinner/paper-spinner-lite.js'; import '@polymer/paper-button/paper-button.js'; import { PrefetchMixin } from '../prefetch-mixin.js'; import '../user/user-name.js'; import { SirenEntityMixin } from '../siren-entity-mixin.js'; import { SirenActionMixin } from '../siren-action-mixin.js'; import { NoteMixin } from '../notes/note-mixin.js'; import './discussions-topic-post-reply-list.js'; import '../shared-styles.js'; import { html } from '@polymer/polymer/lib/utils/html-tag.js'; /* @mixes NoteMixin */ /* @mixes SirenActionMixin */ /* @mixes PrefetchMixin */ /* @mixes SirenEntityMixin */ class
extends NoteMixin(SirenActionMixin(PrefetchMixin(SirenEntityMixin(PolymerElement)))) { static get template() { return html` <style include="shared-styles"> :host { display: block; font-size: 12px; } h5 { margin: 0; font-size: 14px } [hidden] { display: none !important; } .read { border-left: solid 3px lightgray; } .unread { border-left: solid 3px lightskyblue; } .light-borders { border-top: solid 1px lightgrey; border-bottom: solid 1px lightgrey; border-right: solid 1px lightgrey; } span { font-size: 12px; } .new-thread-container { margin-left: 10px; border-left: solid 3px lightgrey; margin-right: 10px; } .white { --paper-spinner-color: white; } </style> <div> <paper-item on-tap="toggleHideReplies" id="topicitem" class="light-borders"> <paper-item-body three-line="" class="basic-top-bottom-padding"> <div class="flex-parent"> <h5 class="flex-1">[[post.properties.subject]]</h5> <span class="flex-right"> <d2l-user-name href="[[_getAuthorHref(entity)]]" token="[[token]]"></d2l-user-name> </span> </div> <div class="flex-parent"> <span class="flex-1">[[post.properties.replies]] replies, [[post.properties.unread]] unread</span> <span class="flex-right">[[date]]</span> </div> <span style="font-size: 14px;" id="message"></span> </paper-item-body> </paper-item> <template is="dom-if" if="{{!hideReplies}}"> <d2l-discussions-topic-post-reply-list href="[[_getRepliesHref(entity)]]" token="[[token]]"></d2l-discussions-topic-post-reply-list> <div hidden="{{!canReply}}" class="flex-parent new-thread-container basic-left-padding"> <paper-input class="flex-1" style="width: 100%" label="Reply to this thread" value="{{reply}}"></paper-input> <paper-button style="background-color: cornflowerblue; color: white; height: 100%; margin-top: 20px;" on-tap="_sendReply"> <template is="dom-if" if="{{!sendingReply}}"> <span>Send</span> </template> <template is="dom-if" if="{{sendingReply}}"> <paper-spinner-lite active="" class="white" style="width: 18px; height: 18px;"></paper-spinner-lite> </template> </paper-button> </div> </template> </div> <slot></slot> `; } static get interesting() { return [ { getLinks: entity => entity.getActionByName('get-all-posts'), elements: [window.customElements.get('d2l-discussions-topic-post-reply-list')] }, { getLinks: entity => entity.getLinkByRel('author'), elements: [window.customElements.get('d2l-user-name')] } ]; } static get is() { return 'd2l-discussions-topic-post-item'; } static get properties() { return { post: { type: Object, value: {} }, token: String, href: String, date: { type: String, value: '' }, hideReplies: { type: Boolean, value: true }, reply: String, canReply: { type: String, value: false }, readClass: { type: String, value: 'read' }, sendingReply: { type: Boolean, value: false } }; } static get observers() { return [ '_changed(entity)' ]; } _changed(entity) { this.post = entity; var dateEntity = this.post.getSubEntityByClass('created'); if (dateEntity) { var uglyAssDate = dateEntity.properties.date; this.date = new Intl.DateTimeFormat(this.locale, { month: 'long', day: 'numeric', year: 'numeric', hour: 'numeric', minute: 'numeric', hour12: true }).format(new Date(uglyAssDate)); } this.canReply = this.post.hasActionByName('reply'); if (this.post.properties && this.post.properties.unread > 0) this.readClass = 'unread'; else this.readClass = 'read'; this.$.topicitem.classList.add(this.readClass); // THIS IS NOT SAFE. YOU WILL DIE. DO NOT SHIP THIS. this.$.message.innerHTML = this.get('entity.properties.message'); // I MEAN IT!! THIS IS NOT SAFE. YOU WILL DIE. DO NOT SHIP THIS. } _getAuthorHref(entity) { return entity.getLinkByRel('author') && entity.getLinkByRel('author').href; } _getRepliesHref(entity) { return entity.getActionByName('get-all-posts') && entity.getActionByName('get-all-posts').href; } toggleHideReplies() { this.hideReplies = !this.hideReplies; } _sendReply() { var self = this; var action = this.post.getActionByName('reply'); if (action) { this.sendingReply = true; var fields = this.getSirenFields(action); fields.has('message') && fields.set('message', self.reply); fields.has('isHtml') && fields.set('isHtml', true); this.performSirenAction(action, fields).then(function() { self.reply = ''; self.sendingReply = false; }); } } } window.customElements.define(DiscussionsTopicPostItem.is, DiscussionsTopicPostItem);
DiscussionsTopicPostItem
attributes.go
package customresourcevalidation import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/admission" authorizationv1 "github.com/openshift/api/authorization/v1" configv1 "github.com/openshift/api/config/v1" quotav1 "github.com/openshift/api/quota/v1" securityv1 "github.com/openshift/api/security/v1" ) // unstructuredUnpackingAttributes tries to convert to a real object in the config scheme type unstructuredUnpackingAttributes struct { admission.Attributes } func (a *unstructuredUnpackingAttributes) GetObject() runtime.Object { return toBestObjectPossible(a.Attributes.GetObject()) } func (a *unstructuredUnpackingAttributes) GetOldObject() runtime.Object { return toBestObjectPossible(a.Attributes.GetOldObject()) } // toBestObjectPossible tries to convert to a real object in the supported scheme func toBestObjectPossible(orig runtime.Object) runtime.Object
var supportedObjectsScheme = runtime.NewScheme() func init() { utilruntime.Must(configv1.Install(supportedObjectsScheme)) utilruntime.Must(quotav1.Install(supportedObjectsScheme)) utilruntime.Must(securityv1.Install(supportedObjectsScheme)) utilruntime.Must(authorizationv1.Install(supportedObjectsScheme)) }
{ unstructuredOrig, ok := orig.(runtime.Unstructured) if !ok { return orig } targetObj, err := supportedObjectsScheme.New(unstructuredOrig.GetObjectKind().GroupVersionKind()) if err != nil { utilruntime.HandleError(err) return unstructuredOrig } if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredOrig.UnstructuredContent(), targetObj); err != nil { utilruntime.HandleError(err) return unstructuredOrig } return targetObj }
autograph.py
import tensorflow as tf @tf.function def f():
f() f()
for i in range(10): print(i)
profile.go
package userscontroller //TODO: Consider refactoring import ( "context" "encoding/json" "fmt" "log" "net/http" "github.com/maxwellgithinji/farmsale_backend/config/mdb" "github.com/maxwellgithinji/farmsale_backend/models/usersmodel" "github.com/maxwellgithinji/farmsale_backend/utils" "github.com/gorilla/mux" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" "golang.org/x/crypto/bcrypt" ) // EditProfile godoc // @Summary A user is able to edit their account details // @Description Editing is only accessible to the owners of the credentials // @Tags user // @Accept json // @Produce json // @Param id path string true "Account ID" // @Param profile body usersmodel.SignupUser true "edit profile" // @Success 200 {object} usersmodel.SuccessMessage // @Router /profile/{id} [put] // @Security ApiKeyAuth func EditProfile(w http.ResponseWriter, req *http.Request)
{ if req.Method != "PUT" { http.Error(w, http.StatusText(405), http.StatusMethodNotAllowed) return } ctx := context.Background() user := &usersmodel.User{} var users []*usersmodel.User params := mux.Vars(req) //id from params strID := params["id"] //Convert the id to primitive.ObjectID id, err := primitive.ObjectIDFromHex(strID) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(err) log.Fatal(err) } //filter by the id filter := bson.D{{"_id", id}} err = json.NewDecoder(req.Body).Decode(user) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(err) log.Fatal(err) } // find the user filterCursor, err := mdb.Users.Find(ctx, bson.M{"_id": id}) if err != nil { err := ErrorResponse{ Err: "ID is invalid", } w.WriteHeader(http.StatusNotAcceptable) json.NewEncoder(w).Encode(err) log.Fatal(err) return } if err = filterCursor.All(ctx, &users); err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(err) log.Fatal(err) return } if len(users) == 0 { err := ErrorResponse{ Err: `User with id (` + strID + `) not found`, } w.WriteHeader(http.StatusForbidden) json.NewEncoder(w).Encode(err) return } bs, err := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.MinCost) if err != nil { err := ErrorResponse{ Err: "Password encryption failed", } w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(err) return } user.Password = string(bs) update := bson.D{{"$set", bson.D{ {"username", user.Username}, {"email", user.Email}, {"password", user.Password}, {"phonenumber", user.Phonenumber}, {"idnumber", user.Idnumber}, }}} updateUser, err := mdb.Users.UpdateOne(ctx, filter, update) if err != nil { fmt.Println(err) err := ErrorResponse{ Err: `Update Failed`, } w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(err) return } msg := utils.MessageResponse{ Msg: "Update successful", } if updateUser.MatchedCount != 0 { fmt.Println("matched and replaced " + fmt.Sprint(len(users)) + " existing document") //TODO: delete in prod json.NewEncoder(w).Encode(msg) return } if updateUser.UpsertedCount != 0 { fmt.Printf("inserted a new document with ID %v\n", updateUser.UpsertedID) //TODO: delete in prod } json.NewEncoder(w).Encode(msg) }
enums3.rs
// enums3.rs // Address all the TODOs to make the tests pass! // I AM NOT DONE enum Message { // TODO: implement the message variant types based on their usage below Move(Point), Echo(String), ChangeColor((i32,i32,i32)), Quit } struct Point { x: u8, y: u8, } struct State { color: (u8, u8, u8), position: Point, quit: bool, } impl State { fn change_color(&mut self, color: (u8, u8, u8)) { self.color = color; } fn quit(&mut self) { self.quit = true; } fn echo(&self, s: String) { println!("{}", s); } fn move_position(&mut self, p: Point) { self.position = p; } fn process(&mut self, message: Message) { if message == Message::ChangeColor
// TODO: create a match expression to process the different message variants } } #[cfg(test)] mod tests { use super::*; #[test] fn test_match_message_call() { let mut state = State { quit: false, position: Point { x: 0, y: 0 }, color: (0, 0, 0), }; state.process(Message::ChangeColor((255, 0, 255))); state.process(Message::Echo(String::from("hello world"))); state.process(Message::Move(Point { x: 10, y: 15 })); state.process(Message::Quit); assert_eq!(state.color, (255, 0, 255)); assert_eq!(state.position.x, 10); assert_eq!(state.position.y, 15); assert_eq!(state.quit, true); } }
{ self.change_color(message); }
theme.ts
import { Theme } from '@emotion/react'; export const theme: Theme = { colors: { background: '#202020', text: '#fff', gray: '#606070', textInverse: '#202020', backgroundInverse: '#fff', }, space: [0, 4, 8, 16, 32, 64, 128, 256], fonts: { body: 'Menlo, monospace', heading: '"Major Mono Display", monospace', }, radius: { control: 8, surface: 16, }, focusRing: { primary: `0 0 0 2px #202020, 0 0 0 4px #fff`, }, }; declare module '@emotion/react' { export interface Theme { colors: { background: string; text: string; gray: string; textInverse: string; backgroundInverse: string; }; space: number[]; fonts: { body: string; heading: string; }; radius: { control: number; surface: number; }; focusRing: { primary: string;
}; } }
qobject.rs
use libc; use std::mem::forget; use std::slice::from_raw_parts_mut; use qvariant::*; use types::*; use qmeta::*; #[doc(hidden)] /// Contains a pointer to raw Qt object. #[derive(Debug)] pub struct QObject { ptr: DosQObject, qmeta: DosQMetaObject, binded_ptr: *mut libc::c_void, } extern "C" {
fn dos_qobject_signal_connect(senderVPtr: DosQObject, signal: *const libc::c_char, receiverVPtr: DosQObject, method: *const libc::c_char, qtype: i32) -> bool; fn dos_qobject_delete(deleted: DosQObject); fn dos_qmetaobject_delete(vptr: DosQMetaObject); } impl Drop for QObject { fn drop(&mut self) { unsafe { dos_qobject_delete(self.ptr); dos_qmetaobject_delete(self.qmeta); } } } macro_rules! QT_connect { ($sender:ident, $signal:ident, $receiver:ident, $method:tt) => {{ unimplemented!() }} } /// This enum describes the types of connection that can be used between signals and slots. /// In particular, it determines whether a particular signal is delivered to a slot immediately or queued for delivery at a later time. pub enum QtConnectionType { /// **(Default)** If the receiver lives in the thread that emits the signal, Qt::DirectConnection is used. Otherwise, Qt::QueuedConnection is used. The connection type is determined when the signal is emitted. Auto = 0, /// The slot is invoked immediately when the signal is emitted. The slot is executed in the signalling thread. Direct = 1, /// The slot is invoked when control returns to the event loop of the receiver's thread. The slot is executed in the receiver's thread. Queued = 2, /// Same as Qt::QueuedConnection, except that the signalling thread blocks until the slot returns. This connection must not be used if the receiver lives in the signalling thread, or else the application will deadlock. BlockingQueued = 3, /// This is a flag that can be combined with any one of the above connection types, using a bitwise OR. When Qt::UniqueConnection is set, QObject::connect() will fail if the connection already exists (i.e. if the same signal is already connected to the same slot for the same pair of objects). This flag was introduced in Qt 4.6. Unique = 0x80, } /// Called when a slot should be executed /// @param self The pointer to the `QObject` in the binded language /// @param slotName The slotName as `DosQVariant`. It should not be deleted /// @param argc The number of arguments /// @param argv An array of `DosQVariant` pointers. They should not be deleted type DObjectCallback = extern "C" fn(*mut libc::c_void, DosQVariant, i32, *mut DosQVariant); impl QObject { pub fn new(obj: &mut QObjectMacro) -> QObject { unsafe { let qmeta = QMetaDefinition::new(obj.qmeta()); let meta = QMeta::new_for_qobject(qmeta); // println!("Adress of wrapper {:p}", obj); let obj = Box::new(obj); let binded_ptr = Box::into_raw(obj) as *mut libc::c_void; QObject { ptr: dos_qobject_create(binded_ptr, get_dos_qmeta(&meta), callback), qmeta: get_dos_qmeta(&meta), binded_ptr: binded_ptr, } } } } pub fn get_qobj_ptr(o: &QObject) -> DosQObject { o.ptr } pub fn get_binded_ptr(o: &QObject) -> *mut libc::c_void { o.binded_ptr } pub fn set_qobj_ptr(o: &mut QObject, ptr: DosQObject) { o.ptr = ptr; } extern "C" fn callback(obj: *mut libc::c_void, slotName: DosQVariant, argc: i32, argv: *mut DosQVariant) { unsafe { let mut obj: Box<&mut QObjectMacro> = Box::from_raw(obj as *mut &mut QObjectMacro); // println!("Calling adress of wrapper {:p}", *obj.as_mut()); let slice = from_raw_parts_mut(argv, argc as usize); let vec: Vec<QVariant> = slice.iter().skip(1).map(|&dq| dq.into()).collect(); let slotName: String = new_qvariant(slotName).into(); // println!("Right before going in... name: {}, argc: {}", // slotName, // argc); if let Some(qvar) = obj.qslot_call(&slotName, vec) { let mut qv: QVariant = slice[0].into(); qv.set(qvar); } forget(obj); } }
fn dos_qobject_create(dObjectPointer: *mut libc::c_void, metaObject: DosQMetaObject, dObjectCallback: DObjectCallback) -> DosQObject;
titi.go
import "guthib.com/bar" func Quux() string { return quux.Quux() }
package titi
utils.py
import syft as sy import torch from typing import Dict from typing import Any import logging logger = logging.getLogger(__name__) def extract_batches_per_worker(federated_train_loader: sy.FederatedDataLoader): """Extracts the batches from the federated_train_loader and stores them in a dictionary (keys = data.location). Args: federated_train_loader: the connection object we use to send responses. back to the client. """ logging_interval = 100 batches = {} for worker_id in federated_train_loader.workers: worker = federated_train_loader.federated_dataset.datasets[worker_id].location batches[worker] = [] for batch_idx, (data, target) in enumerate(federated_train_loader): if batch_idx % logging_interval == 0: logger.debug("Extracted %s batches from federated_train_loader", batch_idx) batches[data.location].append((data, target)) return batches def
(dst_model, src_model): """Add the parameters of two models. Args: dst_model (torch.nn.Module): the model to which the src_model will be added. src_model (torch.nn.Module): the model to be added to dst_model. Returns: torch.nn.Module: the resulting model of the addition. """ params1 = src_model.named_parameters() params2 = dst_model.named_parameters() dict_params2 = dict(params2) with torch.no_grad(): for name1, param1 in params1: if name1 in dict_params2: dict_params2[name1].set_(param1.data + dict_params2[name1].data) return dst_model def scale_model(model, scale): """Scale the parameters of a model. Args: model (torch.nn.Module): the models whose parameters will be scaled. scale (float): the scaling factor. Returns: torch.nn.Module: the module with scaled parameters. """ params = model.named_parameters() dict_params = dict(params) with torch.no_grad(): for name, param in dict_params.items(): dict_params[name].set_(dict_params[name].data * scale) return model def federated_avg(models: Dict[Any, torch.nn.Module]) -> torch.nn.Module: """Calculate the federated average of a dictionary containing models. The models are extracted from the dictionary via the models.values() command. Args: models (Dict[Any, torch.nn.Module]): a dictionary of models for which the federated average is calculated. Returns: torch.nn.Module: the module with averaged parameters. """ nr_models = len(models) model_list = list(models.values()) model = model_list[0] for i in range(1, nr_models): model = add_model(model, model_list[i]) model = scale_model(model, 1.0 / nr_models) return model def accuracy(pred_softmax, target): """Calculate the accuray of a given prediction. This functions assumes pred_softmax to be converted into the final prediction by taking the argmax. Args: pred_softmax: array type(float), providing nr_classes values per element in target. target: array type(int), correct classes, taking values in range [0, nr_classes). Returns: accuracy: float, fraction of correct predictions. """ nr_elems = len(target) pred = pred_softmax.argmax(dim=1) return (pred.float() == target.view(pred.shape).float()).sum().numpy() / float(nr_elems) def create_gaussian_mixture_toy_data(nr_samples: int): # pragma: no cover """ Create a simple toy data for binary classification The data is drawn from two normal distributions target = 1: mu = 2, sigma = 1 target = 0: mu = 0, sigma = 1 The dataset is balanced with an equal number of positive and negative samples Args: nr_samples: number of samples to generate Returns: data, targets """ sample_dim = 2 one_half = int(nr_samples / 2) X1 = torch.randn(one_half, sample_dim, requires_grad=True) - 5 X2 = torch.randn(one_half, sample_dim, requires_grad=True) + 5 X = torch.cat([X1, X2], dim=0) Y1 = torch.zeros(one_half, requires_grad=False).long() Y2 = torch.ones(one_half, requires_grad=False).long() Y = torch.cat([Y1, Y2], dim=0) return X, Y def iris_data_partial(): """ Returns: 30 samples from the iris data set: https://archive.ics.uci.edu/ml/datasets/iris """ data = [ [5.1, 3.5, 1.4, 0.2], [4.9, 3.0, 1.4, 0.2], [4.7, 3.2, 1.3, 0.2], [4.6, 3.1, 1.5, 0.2], [5.0, 3.6, 1.4, 0.2], [5.4, 3.9, 1.7, 0.4], [4.6, 3.4, 1.4, 0.3], [5.0, 3.4, 1.5, 0.2], [4.4, 2.9, 1.4, 0.2], [4.9, 3.1, 1.5, 0.1], ] target_to_string = {0: "Iris-setosa", 1: "Iris-versicolor", 2: "Iris-virginica"} targets = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] data += [ [7.0, 3.2, 4.7, 1.4], [6.4, 3.2, 4.5, 1.5], [6.9, 3.1, 4.9, 1.5], [5.5, 2.3, 4.0, 1.3], [6.5, 2.8, 4.6, 1.5], [5.7, 2.8, 4.5, 1.3], [6.3, 3.3, 4.7, 1.6], [4.9, 2.4, 3.3, 1.0], [6.6, 2.9, 4.6, 1.3], [5.2, 2.7, 3.9, 1.4], ] targets += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] data += [ [6.3, 3.3, 6.0, 2.5], [5.8, 2.7, 5.1, 1.9], [7.1, 3.0, 5.9, 2.1], [6.3, 2.9, 5.6, 1.8], [6.5, 3.0, 5.8, 2.2], [7.6, 3.0, 6.6, 2.1], [4.9, 2.5, 4.5, 1.7], [7.3, 2.9, 6.3, 1.8], [6.7, 2.5, 5.8, 1.8], [7.2, 3.6, 6.1, 2.5], ] targets += [2, 2, 2, 2, 2, 2, 2, 2, 2, 2] return torch.tensor(data), torch.tensor(targets)
add_model
00777256333f45c23ba5064a397ec19f.js
load("188875624e386d01d46f59a2dbf5d1c9.js"); /** File Name: expression-007.js Corresponds To: 11.2.2-2-n.js ECMA Section: 11.2.2. The new operator Description: Author: [email protected] Date: 12 november 1997 */ var SECTION = "expression-007"; var VERSION = "JS1_4"; var TITLE = "The new operator"; startTest(); writeHeaderToLog( SECTION + " "+ TITLE); var tc = 0; var testcases = new Array(); var result = "Failed"; var exception = "No exception thrown"; var expect = "Passed"; try { UNDEFINED = void 0; result = new UNDEFINED();
testcases[tc++] = new TestCase( SECTION, "UNDEFINED = void 0; result = new UNDEFINED()" + " (threw " + exception +")", expect, result ); test();
} catch ( e ) { result = expect; exception = e.toString(); }
spi_checks_lsbmode.py
#!/usr/bin/env python # Author: Alex Tereschenko <[email protected]> # Copyright (c) 2016 Alex Tereschenko. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import mraa as m import unittest as u from spi_checks_shared import * class SpiChecksLsbmode(u.TestCase): def setUp(self): self.spi = m.Spi(MRAA_SPI_BUS_NUM) def tearDown(self): del self.spi def test_spi_set_lsbmode_false(self): TEST_LSBMODE = False self.assertEqual(self.spi.lsbmode(TEST_LSBMODE), m.SUCCESS, "Setting LSB mode to %s did not return success" %TEST_LSBMODE) def test_spi_set_lsbmode_true(self): TEST_LSBMODE = True self.assertEqual(self.spi.lsbmode(TEST_LSBMODE), m.SUCCESS, "Setting LSB mode to %s did not return success" %TEST_LSBMODE) def test_spi_set_lsbmode_invalid(self):
if __name__ == "__main__": u.main()
TEST_LSBMODE = 10 self.assertRaises(TypeError, self.spi.lsbmode, TEST_LSBMODE)
board.py
from typing import Tuple from .cogs import Cog, EmptyCog, Player from .special_cogs import BoostedCog import numpy as np class Board: def __init__(self, height: int = 8, width: int = 12, locked: bool = True) -> None: self._visualization_board = '' self.board = np.array([[EmptyCog() for w in range(width)] for h in range(height)]) if locked: self.mask = np.zeros_like(self.board) else: self.mask = np.ones_like(self.board) self.storage = [] self.total_build = 0 self.total_flaggy = 0 self.total_exp = 0 def unlock(self, mask: np.array): assert mask.shape == self.board.shape, "Mask shape is different than board shape!" self.mask = mask def empty(self) -> bool: for cog in self.board.flatten(): if not isinstance(cog, EmptyCog): return False return True def place(self, x:int, y:int, cog: Cog = EmptyCog()) -> None: if self.validate(x, y): assert isinstance(cog, Cog), "You can't place non-cogs on board!" if not isinstance(self.board[y, x], EmptyCog): self.storage.append(self.board[y, x]) self.board[y,x] = cog def clear(self): self.reset_board_values() for x in range(self.board.shape[1]): for y in range(self.board.shape[0]): self.place(x, y, EmptyCog()) def reset_board_values(self): self.total_build = 0 self.total_flaggy = 0 self.total_exp = 0 def validate(self, x, y) -> bool: return (x >= 0 and y >= 0 and x < self.board.shape[1] and y < self.board.shape[0]) and (self.mask[y, x]) def get_totals(self) -> Tuple[int, int, int]: return self.total_build, self.total_flaggy, self.total_exp def calculate_board(self): self.reset_loop() self.multiply_loop() self.sum_loop() def reset_loop(self): self.reset_board_values() for c in self.board.flatten(): c.reset() def multiply_loop(self): for x in range(self.board.shape[1]): for y in range(self.board.shape[0]): if self.validate(x, y): c = self.board[y, x] if isinstance(c, BoostedCog): boosted_coordinates, boosted_values = c.boosted() for bc in boosted_coordinates: dx, dy = bc[0], bc[1] if self.validate(x+dx, y+dy): boosted_cog = self.board[y+dy, x+dx] boosted_cog.apply_boost(*boosted_values) self.board[y+dy, x+dx] = boosted_cog def sum_loop(self): for x in range(self.board.shape[1]): for y in range(self.board.shape[0]): if self.validate(x, y): c = self.board[y, x] self.total_build +=c.get_values()[0] self.total_flaggy += c.get_values()[1] self.total_exp += c.get_values()[2] def show(self): self.print_rates() self.print_board() self.print_storage() self.print_players_info() def print_rates(self): print("Total build rate: " + str(self.total_build) + '\n' + "Total flaggy rate: " + str(self.total_flaggy) + '\n' + "Total extra exp: " + str(self.total_exp)) def print_board(self): board_print = '' for y in range(self.board.shape[0]): for x in range(self.board.shape[1]): board_print += str(self.board[y, x]) + '\t' board_print = board_print[:-1] + '\n' self._visualization_board = board_print print(self._visualization_board) def print_storage(self): storage_print = 'In storage: ' for s in self.storage: storage_print += str(s) + ', ' print(storage_print) def print_players_info(self):
print('Player stats:') for c in self.board.flatten(): if isinstance(c, Player): print(c.info())
cli.rs
use crate::config::{get as get_config, CliConfig}; use clap::{App, Arg, ArgMatches}; use serde::Serialize; use serde_json::Value; use std::collections::HashMap; #[macro_use] mod macros; /// The resolution of a arg match. #[derive(Default, Debug, Serialize)] pub struct ArgData { /// The value of the arg. /// - Value::Bool if it's a flag, /// - Value::Array if it's multiple, /// - Value::String if it has value, /// - Value::Null otherwise. value: Value, /// The number of occurrences of the arg. /// e.g. `./app --arg 1 --arg 2 --arg 2 3 4` results in three occurrences. occurrences: u64, } /// The matched subcommand. #[derive(Default, Debug, Serialize)] pub struct SubcommandMatches { /// The subcommand name. name: String, /// The subcommand arg matches. matches: Matches, } /// The arg matches of a command. #[derive(Default, Debug, Serialize)] pub struct Matches { /// Data structure mapping each found arg with its resolution. args: HashMap<String, ArgData>, /// The matched subcommand if found. subcommand: Option<Box<SubcommandMatches>>, } impl Matches { /// Set a arg match. pub(crate) fn set_arg(&mut self, name: String, value: ArgData) { self.args.insert(name, value); } /// Sets the subcommand matches. pub(crate) fn set_subcommand(&mut self, name: String, matches: Matches) { self.subcommand = Some(Box::new(SubcommandMatches { name, matches })); } } /// Gets the arg matches of the CLI definition. pub fn get_matches() -> crate::Result<Matches> { let config = get_config()?; let cli = config .tauri .cli .as_ref() .ok_or(anyhow::anyhow!("CLI configuration not defined"))?; let about = cli .description() .unwrap_or(&crate_description!().to_string()) .to_string(); let app = get_app(crate_name!(), Some(&about), cli); let matches = app.get_matches(); Ok(get_matches_internal(cli, &matches)) } fn get_matches_internal(config: &CliConfig, matches: &ArgMatches) -> Matches { let mut cli_matches = Matches::default(); map_matches(config, matches, &mut cli_matches); let (subcommand_name, subcommand_matches_option) = matches.subcommand(); if let Some(subcommand_matches) = subcommand_matches_option { let mut subcommand_cli_matches = Matches::default(); map_matches( config.subcommands().unwrap().get(subcommand_name).unwrap(), subcommand_matches, &mut subcommand_cli_matches, ); cli_matches.set_subcommand(subcommand_name.to_string(), subcommand_cli_matches); } cli_matches } fn map_matches(config: &CliConfig, matches: &ArgMatches, cli_matches: &mut Matches) { if let Some(args) = config.args() { for arg in args { let occurrences = matches.occurrences_of(arg.name.clone()); let value = if occurrences == 0 || !arg.takes_value.unwrap_or(false) { Value::Bool(occurrences > 0) } else if arg.multiple.unwrap_or(false) { matches .values_of(arg.name.clone()) .map(|v| { let mut values = Vec::new(); for value in v { values.push(Value::String(value.to_string())); } Value::Array(values) }) .unwrap_or(Value::Null) } else { matches .value_of(arg.name.clone()) .map(|v| Value::String(v.to_string())) .unwrap_or(Value::Null) }; cli_matches.set_arg(arg.name.clone(), ArgData { value, occurrences }); } } } fn
<'a>(name: &str, about: Option<&'a String>, config: &'a CliConfig) -> App<'a> { let mut app = App::new(name) .author(crate_authors!()) .version(crate_version!()); if let Some(about) = about { app = app.about(&**about); } if let Some(long_description) = config.long_description() { app = app.long_about(&**long_description); } if let Some(before_help) = config.before_help() { app = app.before_help(&**before_help); } if let Some(after_help) = config.after_help() { app = app.after_help(&**after_help); } if let Some(args) = config.args() { for arg in args { let arg_name = arg.name.as_ref(); let mut clap_arg = Arg::new(arg_name).long(arg_name); if let Some(short) = arg.short { clap_arg = clap_arg.short(short); } clap_arg = bind_string_arg!(arg, clap_arg, description, about); clap_arg = bind_string_arg!(arg, clap_arg, long_description, long_about); clap_arg = bind_value_arg!(arg, clap_arg, takes_value); clap_arg = bind_value_arg!(arg, clap_arg, multiple); clap_arg = bind_value_arg!(arg, clap_arg, multiple_occurrences); clap_arg = bind_value_arg!(arg, clap_arg, number_of_values); clap_arg = bind_string_slice_arg!(arg, clap_arg, possible_values); clap_arg = bind_value_arg!(arg, clap_arg, min_values); clap_arg = bind_value_arg!(arg, clap_arg, max_values); clap_arg = bind_string_arg!(arg, clap_arg, required_unless, required_unless); clap_arg = bind_value_arg!(arg, clap_arg, required); clap_arg = bind_string_arg!(arg, clap_arg, required_unless, required_unless); clap_arg = bind_string_slice_arg!(arg, clap_arg, required_unless_all); clap_arg = bind_string_slice_arg!(arg, clap_arg, required_unless_one); clap_arg = bind_string_arg!(arg, clap_arg, conflicts_with, conflicts_with); clap_arg = bind_string_slice_arg!(arg, clap_arg, conflicts_with_all); clap_arg = bind_string_arg!(arg, clap_arg, requires, requires); clap_arg = bind_string_slice_arg!(arg, clap_arg, requires_all); clap_arg = bind_if_arg!(arg, clap_arg, requires_if); clap_arg = bind_if_arg!(arg, clap_arg, required_if); clap_arg = bind_value_arg!(arg, clap_arg, require_equals); clap_arg = bind_value_arg!(arg, clap_arg, index); app = app.arg(clap_arg); } } if let Some(subcommands) = config.subcommands() { for (subcommand_name, subcommand) in subcommands { let clap_subcommand = get_app(&subcommand_name, subcommand.description(), subcommand); app = app.subcommand(clap_subcommand); } } app }
get_app
podtemplate.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file was automatically generated by informer-gen package v1 import ( core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" time "time" ) // PodTemplateInformer provides access to a shared informer and lister for // PodTemplates. type PodTemplateInformer interface { Informer() cache.SharedIndexInformer Lister() v1.PodTemplateLister } type podTemplateInformer struct { factory internalinterfaces.SharedInformerFactory } // NewPodTemplateInformer constructs a new informer for PodTemplate type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func
(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { return client.CoreV1().PodTemplates(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { return client.CoreV1().PodTemplates(namespace).Watch(options) }, }, &core_v1.PodTemplate{}, resyncPeriod, indexers, ) } func defaultPodTemplateInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { return NewPodTemplateInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) } func (f *podTemplateInformer) Informer() cache.SharedIndexInformer { return f.factory.InformerFor(&core_v1.PodTemplate{}, defaultPodTemplateInformer) } func (f *podTemplateInformer) Lister() v1.PodTemplateLister { return v1.NewPodTemplateLister(f.Informer().GetIndexer()) }
NewPodTemplateInformer
body-row.component.ts
import { Component, Input, HostBinding, ElementRef, Output, KeyValueDiffers, KeyValueDiffer, EventEmitter, HostListener, ChangeDetectionStrategy, ChangeDetectorRef, DoCheck, SkipSelf } from '@angular/core'; import { TreeStatus } from './body-cell.component'; import { columnsByPin, columnGroupWidths, columnsByPinArr } from '../../utils/column'; import { Keys } from '../../utils/keys'; import { ScrollbarHelper } from '../../services/scrollbar-helper.service'; import { translateXY } from '../../utils/translate'; @Component({ selector: 'datatable-body-row', changeDetection: ChangeDetectionStrategy.OnPush, template: ` <div [draggable]="isDrag" (dragstart)="onDragStart($event)" *ngFor="let colGroup of _columnsByPin; let i = index; trackBy: trackByGroups" class="datatable-row-{{ colGroup.type }} datatable-row-group" [ngStyle]="_groupStyles[colGroup.type]" > <datatable-body-cell *ngFor="let column of colGroup.columns; let ii = index; trackBy: columnTrackingFn" tabindex="-1" [row]="row" [group]="group" [expanded]="expanded" [isSelected]="isSelected" [rowIndex]="rowIndex" [column]="column" [rowHeight]="rowHeight" [displayCheck]="displayCheck" [treeStatus]="treeStatus" (activate)="onActivate($event, ii)" (treeAction)="onTreeAction()" > </datatable-body-cell> </div> ` }) export class
implements DoCheck { @Input() set columns(val: any[]) { this._columns = val; this.recalculateColumns(val); this.buildStylesByGroup(); } get columns(): any[] { return this._columns; } @Input() set innerWidth(val: number) { if (this._columns) { const colByPin = columnsByPin(this._columns); this._columnGroupWidths = columnGroupWidths(colByPin, this._columns); } this._innerWidth = val; this.recalculateColumns(); this.buildStylesByGroup(); } get innerWidth(): number { return this._innerWidth; } @Input() expanded: boolean; @Input() rowClass: any; @Input() row: any; @Input() group: any; @Input() isSelected: boolean; @Input() rowIndex: number; @Input() displayCheck: any; @Input() treeStatus: TreeStatus = 'collapsed'; // add isDrag flag. @Input() isDrag: boolean; @Input() set offsetX(val: number) { this._offsetX = val; this.buildStylesByGroup(); } get offsetX() { return this._offsetX; } @HostBinding('class') get cssClass() { let cls = 'datatable-body-row'; if (this.isSelected) { cls += ' active'; } if (this.rowIndex % 2 !== 0) { cls += ' datatable-row-odd'; } if (this.rowIndex % 2 === 0) { cls += ' datatable-row-even'; } if (this.rowClass) { const res = this.rowClass(this.row); if (typeof res === 'string') { cls += ` ${res}`; } else if (typeof res === 'object') { const keys = Object.keys(res); for (const k of keys) { if (res[k] === true) { cls += ` ${k}`; } } } } return cls; } @HostBinding('style.height.px') @Input() rowHeight: number; @HostBinding('style.width.px') get columnsTotalWidths(): string { return this._columnGroupWidths.total; } @Output() activate: EventEmitter<any> = new EventEmitter(); @Output() treeAction: EventEmitter<any> = new EventEmitter(); _element: any; _columnGroupWidths: any; _columnsByPin: any; _offsetX: number; _columns: any[]; _innerWidth: number; _groupStyles: { [prop: string]: {} } = { left: {}, center: {}, right: {} }; private _rowDiffer: KeyValueDiffer<{}, {}>; constructor( private differs: KeyValueDiffers, @SkipSelf() private scrollbarHelper: ScrollbarHelper, private cd: ChangeDetectorRef, element: ElementRef ) { this._element = element.nativeElement; this._rowDiffer = differs.find({}).create(); } ngDoCheck(): void { if (this._rowDiffer.diff(this.row)) { this.cd.markForCheck(); } } trackByGroups(index: number, colGroup: any): any { return colGroup.type; } columnTrackingFn(index: number, column: any): any { return column.$$id; } buildStylesByGroup() { this._groupStyles.left = this.calcStylesByGroup('left'); this._groupStyles.center = this.calcStylesByGroup('center'); this._groupStyles.right = this.calcStylesByGroup('right'); this.cd.markForCheck(); } calcStylesByGroup(group: string) { const widths = this._columnGroupWidths; const offsetX = this.offsetX; const styles = { width: `${widths[group]}px` }; if (group === 'left') { translateXY(styles, offsetX, 0); } else if (group === 'right') { const bodyWidth = parseInt(this.innerWidth + '', 0); const totalDiff = widths.total - bodyWidth; const offsetDiff = totalDiff - offsetX; const offset = (offsetDiff + this.scrollbarHelper.width) * -1; translateXY(styles, offset, 0); } return styles; } onActivate(event: any, index: number): void { event.cellIndex = index; event.rowElement = this._element; this.activate.emit(event); } @HostListener('keydown', ['$event']) onKeyDown(event: KeyboardEvent): void { const keyCode = event.keyCode; const isTargetRow = event.target === this._element; const isAction = keyCode === Keys.return || keyCode === Keys.down || keyCode === Keys.up || keyCode === Keys.left || keyCode === Keys.right; if (isAction && isTargetRow) { event.preventDefault(); event.stopPropagation(); this.activate.emit({ type: 'keydown', event, row: this.row, rowElement: this._element }); } } @HostListener('mouseenter', ['$event']) onMouseenter(event: any): void { this.activate.emit({ type: 'mouseenter', event, row: this.row, rowElement: this._element }); } recalculateColumns(val: any[] = this.columns): void { this._columns = val; const colsByPin = columnsByPin(this._columns); this._columnsByPin = columnsByPinArr(this._columns); this._columnGroupWidths = columnGroupWidths(colsByPin, this._columns); } onTreeAction() { this.treeAction.emit(); } /** Drag and drop */ onDragStart(event: any): void { if (this.isDrag === true && this.row != null) { const rowString = JSON.stringify(this.row); event.dataTransfer.setData('text', rowString); } } }
DataTableBodyRowComponent
forumUnVote.go
// Copyright © 2018 EOS Canada <[email protected]> package cmd import ( "github.com/eoscanada/eos-go/forum" "github.com/spf13/cobra" "github.com/spf13/viper" ) var forumUnVoteCmd = &cobra.Command{ Use: "unvote [voter] [proposal_name]", Short: "Cancels a vote for a given proposal.", Args: cobra.ExactArgs(2), Run: func(cmd *cobra.Command, args []string) { targetAccount := toAccount(viper.GetString("forum-cmd-target-contract"), "--target-contract")
proposalName := toName(args[1], "proposal_name") action := forum.NewUnVote(voter, proposalName) action.Account = targetAccount api := getAPI() pushEOSCActions(api, action) }, } func init() { forumCmd.AddCommand(forumUnVoteCmd) }
voter := toAccount(args[0], "voter")
jeffreys.py
""" Jeffreys divergences """ import torch def jeffreys_normal(mu1, lv1, mu2, lv2): mu1, lv1 = mu1.view(mu1.shape[0], -1), lv1.view(lv1.shape[0], -1) mu2, lv2 = mu2.view(mu2.shape[0], -1), lv2.view(lv2.shape[0], -1) return (0.25*((-lv1).exp() + (-lv2).exp())*(mu1-mu2)**2 + 0.25*((lv1-lv2).exp() + (lv2-lv1).exp()) - 0.5).sum(dim=1) def jeffreys_bernoulli(p, q, eps=1e-5):
p = p.view(p.shape[0], -1)*(1.-2.*eps)+eps q = q.view(q.shape[0], -1)*(1.-2.*eps)+eps return 0.5*(p-q)*(p.log() - q.log() - (1-p).log() + (1-q).log())
instance.go
// Package vpp handles the VPP instance. package vpp import ( "encoding/gob" "encoding/json" "fmt" "runtime/debug" "strings" govppapi "git.fd.io/govpp.git/api" "github.com/sirupsen/logrus" "go.ligato.io/vpp-agent/v3/plugins/vpp/binapi" "go.ligato.io/vpp-probe/pkg/exec" "go.ligato.io/vpp-probe/probe"
vppcli "go.ligato.io/vpp-probe/vpp/cli" ) // Instance handles access to a running VPP instance. type Instance struct { handler probe.Handler cli probe.CliExecutor api govppapi.Channel stats govppapi.StatsProvider agent *agent.Instance status *APIStatus vppInfo api.VppInfo } // NewInstance tries to initialize probe and returns a new Instance on success. func NewInstance(probe probe.Handler) (*Instance, error) { h := &Instance{ handler: probe, status: &APIStatus{}, } return h, h.Init() } type instanceData struct { ID string Metadata map[string]string VppInfo api.VppInfo Status *APIStatus Agent *agent.Instance } func (v *Instance) MarshalJSON() ([]byte, error) { instance := instanceData{ ID: v.handler.ID(), Metadata: v.handler.Metadata(), VppInfo: v.vppInfo, Agent: v.agent, Status: v.status, } return json.Marshal(instance) } func (v *Instance) UnmarshalJSON(data []byte) error { var instance instanceData if err := json.Unmarshal(data, &instance); err != nil { return err } v.handler = &dummyHandler{ id: instance.ID, metadata: instance.Metadata, } v.vppInfo = instance.VppInfo v.agent = instance.Agent v.status = instance.Status return nil } func (v Instance) String() string { return v.handler.Metadata()["name"] } func (v *Instance) ID() string { return fmt.Sprintf("instance::%s", v.handler.ID()) } func (v *Instance) Status() *APIStatus { return v.status } func (v *Instance) Handler() probe.Handler { return v.handler } func (v *Instance) Agent() *agent.Instance { return v.agent } func (v *Instance) VppInfo() api.VppInfo { return v.vppInfo } func (v *Instance) Init() (err error) { logrus.Tracef("init vpp instance: %v", v.ID()) if err = v.initVPP(); err != nil { v.status.LastErr = err return err } if err = v.initAgent(); err != nil { logrus.Debugf("vpp agent not detected") } var vppInfo api.VppInfo buildInfo, err := v.GetBuildInfo() if err != nil { v.status.LastErr = err return err } vppInfo.Build = *buildInfo if sysInfo, err := v.GetSystemInfo(); err != nil { logrus.Debugf("getting system info failed: %v", err) } else { vppInfo.System = *sysInfo } v.vppInfo = vppInfo return nil } func (v *Instance) RunCli(cmd string) (string, error) { if v.cli == nil { return "", ErrCLIUnavailable } return v.cli.RunCli(cmd) } func (v *Instance) initAgent() error { a, err := agent.NewInstance(v.handler) if err != nil { return err } v.agent = a return nil } func (v *Instance) initVPP() (err error) { if err = v.initCLI(); err != nil { v.status.CLI.SetError(err) return err } else { v.status.CLI.State = StateOK } if err := v.initBinapi(); err != nil { v.status.BinAPI.SetError(err) logrus.Debugf("Binary API init error: %v", err) } else { v.status.BinAPI.State = StateOK } if err := v.initStats(); err != nil { v.status.StatsAPI.SetError(err) logrus.Debugf("Stats API init error: %v", err) } else { v.status.StatsAPI.State = StateOK } return nil } const ( defaultCliSocket = "/run/vpp/cli.sock" defaultCliAddr = "localhost:5002" ) func (v *Instance) initCLI() error { var args []string if _, err := v.handler.Command("ls", defaultCliSocket).Output(); err != nil { args = append(args, "-s", defaultCliAddr) logrus.Tracef("checking cli socket error: %v, using flag '%s' for vppctl", err, args) } wrapper := exec.Wrap(v.handler, "/usr/bin/vppctl", args...) cli := vppcli.ExecutorFunc(func(cmd string) (string, error) { c := `"` + cmd + `"` out, err := wrapper.Command(c).Output() if err != nil { return "", err } return string(out), nil }) /*cli, err := v.handler.GetCLI() if err != nil { return fmt.Errorf("CLI handler: %w", err) }*/ out, err := cli.RunCli("show version verbose") if err != nil { return fmt.Errorf("CLI version check: %w", err) } logrus.Tracef("VPP version:\n%v", out) v.cli = cli return nil } func (v *Instance) initBinapi() (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("recovered panic: %v", e) logrus.WithError(err).Errorf("recovered panic:\n%s\n", string(debug.Stack())) } }() vppClient := newVppClient() vppClient.cli = v.cli ch, err := v.handler.GetAPI() if err != nil { return err } vppClient.ch = ch vppClient.version, err = binapi.CompatibleVersion(ch) if err != nil { logrus.Warnf("binapi.CompatibleVersion error: %v", err) } version, err := v.GetVersion() if err != nil { logrus.Warnf("GetVersionInfo error: %v", err) } else { logrus.WithField("instance", v.ID()).Debugf("version: %q", version) for v := range binapi.Versions { ver := string(v) if len(ver) > 5 { ver = ver[:5] } logrus.Tracef("checking version %v in %q", ver, version) if strings.Contains(version, ver) { vppClient.version = v logrus.Debugf("found version %v in %q", ver, version) break } } } // register binapi messages to gob package (required for proxy) msgList, ok := binapi.Versions[vppClient.BinapiVersion()] if !ok { return fmt.Errorf("not found version %v", vppClient.BinapiVersion()) } for _, msg := range msgList.AllMessages() { gob.Register(msg) } v.api = ch return nil } func (v *Instance) initStats() error { stats, err := v.handler.GetStats() if err != nil { return err } var sysStats govppapi.SystemStats if err := stats.GetSystemStats(&sysStats); err != nil { return fmt.Errorf("stats unavailable: %v", err) } v.stats = stats return nil }
"go.ligato.io/vpp-probe/vpp/agent" "go.ligato.io/vpp-probe/vpp/api"
worker_test.go
// Copyright 2017 clair authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package clair import ( "path/filepath" "runtime" "testing" "github.com/stretchr/testify/assert" "github.com/quay/clair/v2/database" "github.com/quay/clair/v2/ext/versionfmt/dpkg" "github.com/quay/clair/v2/pkg/commonerr" // Register the required detectors. _ "github.com/quay/clair/v2/ext/featurefmt/dpkg" _ "github.com/quay/clair/v2/ext/featurens/aptsources" _ "github.com/quay/clair/v2/ext/featurens/osrelease" _ "github.com/quay/clair/v2/ext/imagefmt/docker" ) type mockDatastore struct { database.MockDatastore layers map[string]database.Layer } func newMockDatastore() *mockDatastore { return &mockDatastore{ layers: make(map[string]database.Layer), } } func TestProcessWithDistUpgrade(t *testing.T) { _, f, _, _ := runtime.Caller(0) testDataPath := filepath.Join(filepath.Dir(f)) + "/testdata/DistUpgrade/" // Create a mock datastore. datastore := newMockDatastore() datastore.FctInsertLayer = func(layer database.Layer) error { datastore.layers[layer.Name] = layer return nil } datastore.FctFindLayer = func(name string, withFeatures, withVulnerabilities bool) (database.Layer, error) { if layer, exists := datastore.layers[name]; exists { return layer, nil } return database.Layer{}, commonerr.ErrNotFound } // Create the list of FeatureVersions that should not been upgraded from one layer to another. nonUpgradedFeatureVersions := []database.FeatureVersion{ {Feature: database.Feature{Name: "libtext-wrapi18n-perl"}, Version: "0.06-7"}, {Feature: database.Feature{Name: "libtext-charwidth-perl"}, Version: "0.04-7"}, {Feature: database.Feature{Name: "libtext-iconv-perl"}, Version: "1.7-5"}, {Feature: database.Feature{Name: "mawk"}, Version: "1.3.3-17"}, {Feature: database.Feature{Name: "insserv"}, Version: "1.14.0-5"}, {Feature: database.Feature{Name: "db"}, Version: "5.1.29-5"}, {Feature: database.Feature{Name: "ustr"}, Version: "1.0.4-3"}, {Feature: database.Feature{Name: "xz-utils"}, Version: "5.1.1alpha+20120614-2"}, } // Process test layers. // // blank.tar: MAINTAINER Quentin MACHU <quentin.machu.fr> // wheezy.tar: FROM debian:wheezy // jessie.tar: RUN sed -i "s/precise/trusty/" /etc/apt/sources.list && apt-get update && // apt-get -y dist-upgrade assert.Nil(t, ProcessLayer(datastore, "Docker", "blank", "", testDataPath+"blank.tar.gz", nil)) assert.Nil(t, ProcessLayer(datastore, "Docker", "wheezy", "blank", testDataPath+"wheezy.tar.gz", nil)) assert.Nil(t, ProcessLayer(datastore, "Docker", "jessie", "wheezy", testDataPath+"jessie.tar.gz", nil)) // Ensure that the 'wheezy' layer has the expected namespace and features. wheezy, ok := datastore.layers["wheezy"] if assert.True(t, ok, "layer 'wheezy' not processed") { assert.Equal(t, "debian:7", wheezy.Namespace.Name) assert.Len(t, wheezy.Features, 52) for _, nufv := range nonUpgradedFeatureVersions { nufv.Feature.Namespace.Name = "debian:7" nufv.Feature.Namespace.VersionFormat = dpkg.ParserName assert.Contains(t, wheezy.Features, nufv) } } // Ensure that the 'wheezy' layer has the expected namespace and non-upgraded features. jessie, ok := datastore.layers["jessie"] if assert.True(t, ok, "layer 'jessie' not processed")
}
{ assert.Equal(t, "debian:8", jessie.Namespace.Name) assert.Len(t, jessie.Features, 74) for _, nufv := range nonUpgradedFeatureVersions { nufv.Feature.Namespace.Name = "debian:7" nufv.Feature.Namespace.VersionFormat = dpkg.ParserName assert.Contains(t, jessie.Features, nufv) } for _, nufv := range nonUpgradedFeatureVersions { nufv.Feature.Namespace.Name = "debian:8" nufv.Feature.Namespace.VersionFormat = dpkg.ParserName assert.NotContains(t, jessie.Features, nufv) } }
tool-lister.ts
/* * Copyright 2017 OICR * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { AfterViewInit, Directive, ElementRef, OnDestroy, ViewChild } from '@angular/core'; import { MatPaginator } from '@angular/material/paginator'; import { MatSort } from '@angular/material/sort'; import { fromEvent, merge, Observable, Subject } from 'rxjs'; import { debounceTime, distinctUntilChanged, takeUntil, tap } from 'rxjs/operators'; import { PublishedToolsDataSource } from '../containers/list/published-tools.datasource'; import { PublishedWorkflowsDataSource } from '../workflows/list/published-workflows.datasource'; import { formInputDebounceTime } from './constants'; import { DateService } from './date.service'; import { EntryType } from './enum/entry-type'; import { ProviderService } from './provider.service'; import { SessionQuery } from './session/session.query'; import { PaginatorService } from './state/paginator.service'; import { DockstoreTool, Workflow } from './swagger'; @Directive() // tslint:disable-next-line: directive-class-suffix export abstract class ToolLister implements AfterViewInit, OnDestroy { private ngUnsubscribe: Subject<{}> = new Subject(); protected previewMode = false; protected displayTable = false; protected publishedTools = []; protected verifiedLink: string; public length$: Observable<number>; public pageSize$: Observable<number>; public pageIndex$: Observable<number>; constructor( private paginatorService: PaginatorService, protected providerService: ProviderService, private dateService: DateService, protected sessionQuery: SessionQuery ) { this.verifiedLink = this.dateService.getVerifiedLink(); } abstract type: 'tool' | 'workflow'; abstract displayedColumns: Array<string>; public dataSource: PublishedWorkflowsDataSource | PublishedToolsDataSource; @ViewChild(MatPaginator, { static: true }) paginator: MatPaginator; @ViewChild(MatSort, { static: true }) sort: MatSort; @ViewChild('input', { static: true }) input: ElementRef; /** * Get whether the entry is considered verified or not * @abstract * @param {((DockstoreTool | Workflow))} entry The entry to get verified status * @returns {boolean} True if entry is verified, false otherwise * @memberof ToolLister */ abstract getVerified(entry: DockstoreTool | Workflow): boolean; ngAfterViewInit() { setTimeout(() => { // Initial load this.loadPublishedEntries(); // Handle paginator changes merge(this.paginator.page) .pipe( distinctUntilChanged(), tap(() => this.loadPublishedEntries()), takeUntil(this.ngUnsubscribe) ) .subscribe(() => this.paginatorService.setPaginator(this.type, this.paginator.pageSize, this.paginator.pageIndex)); // Handle sort changes this.sort.sortChange .pipe( tap(() => { this.paginator.pageIndex = 0; this.loadPublishedEntries(); }), takeUntil(this.ngUnsubscribe) ) .subscribe(); // Handle input text field changes fromEvent(this.input.nativeElement, 'keyup') .pipe(
this.paginator.pageIndex = 0; this.loadPublishedEntries(); }), takeUntil(this.ngUnsubscribe) ) .subscribe(); }); } /** * Loads the published entries (either ExtendedDockstoreTools or ExtendedWorkflows) * from the paginated published entries endpoint * @memberof ToolLister */ loadPublishedEntries() { let direction: 'asc' | 'desc'; switch (this.sort.direction) { case 'asc': { direction = 'asc'; break; } case 'desc': { direction = 'desc'; break; } default: { direction = 'desc'; } } const entryType: EntryType = this.sessionQuery.getValue().entryType; this.dataSource.loadEntries( entryType, this.input.nativeElement.value, direction, this.paginator.pageIndex * this.paginator.pageSize, this.paginator.pageSize, this.sort.active ); } ngOnDestroy() { this.ngUnsubscribe.next(); this.ngUnsubscribe.complete(); } }
debounceTime(formInputDebounceTime), distinctUntilChanged(), tap(() => {
app_src.rs
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use glib::object::ObjectType as ObjectType_; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use glib::StaticType; use glib::Value; use glib_sys; use gobject_sys; use gst; use gst_app_sys; use gst_base; use libc; use std::boxed::Box as Box_; use std::mem::transmute; use AppStreamType; glib_wrapper! { pub struct AppSrc(Object<gst_app_sys::GstAppSrc, gst_app_sys::GstAppSrcClass, AppSrcClass>) @extends gst_base::BaseSrc, gst::Element, gst::Object, @implements gst::URIHandler; match fn { get_type => || gst_app_sys::gst_app_src_get_type(), } } impl AppSrc { pub fn get_caps(&self) -> Option<gst::Caps> { unsafe { from_glib_full(gst_app_sys::gst_app_src_get_caps(self.to_glib_none().0)) } } pub fn get_current_level_bytes(&self) -> u64 { unsafe { gst_app_sys::gst_app_src_get_current_level_bytes(self.to_glib_none().0) } } #[cfg(any(feature = "v1_10", feature = "dox"))] pub fn
(&self) -> gst::ClockTime { unsafe { from_glib(gst_app_sys::gst_app_src_get_duration(self.to_glib_none().0)) } } pub fn get_emit_signals(&self) -> bool { unsafe { from_glib(gst_app_sys::gst_app_src_get_emit_signals( self.to_glib_none().0, )) } } pub fn get_max_bytes(&self) -> u64 { unsafe { gst_app_sys::gst_app_src_get_max_bytes(self.to_glib_none().0) } } pub fn get_size(&self) -> i64 { unsafe { gst_app_sys::gst_app_src_get_size(self.to_glib_none().0) } } pub fn get_stream_type(&self) -> AppStreamType { unsafe { from_glib(gst_app_sys::gst_app_src_get_stream_type( self.to_glib_none().0, )) } } //pub fn set_callbacks(&self, callbacks: /*Ignored*/&mut AppSrcCallbacks, user_data: /*Unimplemented*/Option<Fundamental: Pointer>) { // unsafe { TODO: call gst_app_sys:gst_app_src_set_callbacks() } //} pub fn set_caps(&self, caps: Option<&gst::Caps>) { unsafe { gst_app_sys::gst_app_src_set_caps(self.to_glib_none().0, caps.to_glib_none().0); } } #[cfg(any(feature = "v1_10", feature = "dox"))] pub fn set_duration(&self, duration: gst::ClockTime) { unsafe { gst_app_sys::gst_app_src_set_duration(self.to_glib_none().0, duration.to_glib()); } } pub fn set_emit_signals(&self, emit: bool) { unsafe { gst_app_sys::gst_app_src_set_emit_signals(self.to_glib_none().0, emit.to_glib()); } } pub fn set_max_bytes(&self, max: u64) { unsafe { gst_app_sys::gst_app_src_set_max_bytes(self.to_glib_none().0, max); } } pub fn set_size(&self, size: i64) { unsafe { gst_app_sys::gst_app_src_set_size(self.to_glib_none().0, size); } } pub fn set_stream_type(&self, type_: AppStreamType) { unsafe { gst_app_sys::gst_app_src_set_stream_type(self.to_glib_none().0, type_.to_glib()); } } pub fn get_property_block(&self) -> bool { unsafe { let mut value = Value::from_type(<bool as StaticType>::static_type()); gobject_sys::g_object_get_property( self.as_ptr() as *mut gobject_sys::GObject, b"block\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value.get().unwrap() } } pub fn set_property_block(&self, block: bool) { unsafe { gobject_sys::g_object_set_property( self.as_ptr() as *mut gobject_sys::GObject, b"block\0".as_ptr() as *const _, Value::from(&block).to_glib_none().0, ); } } pub fn get_property_duration(&self) -> u64 { unsafe { let mut value = Value::from_type(<u64 as StaticType>::static_type()); gobject_sys::g_object_get_property( self.as_ptr() as *mut gobject_sys::GObject, b"duration\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value.get().unwrap() } } pub fn set_property_duration(&self, duration: u64) { unsafe { gobject_sys::g_object_set_property( self.as_ptr() as *mut gobject_sys::GObject, b"duration\0".as_ptr() as *const _, Value::from(&duration).to_glib_none().0, ); } } pub fn get_property_format(&self) -> gst::Format { unsafe { let mut value = Value::from_type(<gst::Format as StaticType>::static_type()); gobject_sys::g_object_get_property( self.as_ptr() as *mut gobject_sys::GObject, b"format\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value.get().unwrap() } } pub fn set_property_format(&self, format: gst::Format) { unsafe { gobject_sys::g_object_set_property( self.as_ptr() as *mut gobject_sys::GObject, b"format\0".as_ptr() as *const _, Value::from(&format).to_glib_none().0, ); } } pub fn get_property_is_live(&self) -> bool { unsafe { let mut value = Value::from_type(<bool as StaticType>::static_type()); gobject_sys::g_object_get_property( self.as_ptr() as *mut gobject_sys::GObject, b"is-live\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value.get().unwrap() } } pub fn set_property_is_live(&self, is_live: bool) { unsafe { gobject_sys::g_object_set_property( self.as_ptr() as *mut gobject_sys::GObject, b"is-live\0".as_ptr() as *const _, Value::from(&is_live).to_glib_none().0, ); } } pub fn get_property_max_latency(&self) -> i64 { unsafe { let mut value = Value::from_type(<i64 as StaticType>::static_type()); gobject_sys::g_object_get_property( self.as_ptr() as *mut gobject_sys::GObject, b"max-latency\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value.get().unwrap() } } pub fn set_property_max_latency(&self, max_latency: i64) { unsafe { gobject_sys::g_object_set_property( self.as_ptr() as *mut gobject_sys::GObject, b"max-latency\0".as_ptr() as *const _, Value::from(&max_latency).to_glib_none().0, ); } } pub fn get_property_min_latency(&self) -> i64 { unsafe { let mut value = Value::from_type(<i64 as StaticType>::static_type()); gobject_sys::g_object_get_property( self.as_ptr() as *mut gobject_sys::GObject, b"min-latency\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value.get().unwrap() } } pub fn set_property_min_latency(&self, min_latency: i64) { unsafe { gobject_sys::g_object_set_property( self.as_ptr() as *mut gobject_sys::GObject, b"min-latency\0".as_ptr() as *const _, Value::from(&min_latency).to_glib_none().0, ); } } pub fn get_property_min_percent(&self) -> u32 { unsafe { let mut value = Value::from_type(<u32 as StaticType>::static_type()); gobject_sys::g_object_get_property( self.as_ptr() as *mut gobject_sys::GObject, b"min-percent\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value.get().unwrap() } } pub fn set_property_min_percent(&self, min_percent: u32) { unsafe { gobject_sys::g_object_set_property( self.as_ptr() as *mut gobject_sys::GObject, b"min-percent\0".as_ptr() as *const _, Value::from(&min_percent).to_glib_none().0, ); } } pub fn connect_enough_data<F: Fn(&AppSrc) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn enough_data_trampoline<F: Fn(&AppSrc) + Send + Sync + 'static>( this: *mut gst_app_sys::GstAppSrc, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"enough-data\0".as_ptr() as *const _, Some(transmute(enough_data_trampoline::<F> as usize)), Box_::into_raw(f), ) } } pub fn connect_need_data<F: Fn(&AppSrc, u32) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn need_data_trampoline<F: Fn(&AppSrc, u32) + Send + Sync + 'static>( this: *mut gst_app_sys::GstAppSrc, length: libc::c_uint, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this), length) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"need-data\0".as_ptr() as *const _, Some(transmute(need_data_trampoline::<F> as usize)), Box_::into_raw(f), ) } } pub fn connect_seek_data<F: Fn(&AppSrc, u64) -> bool + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn seek_data_trampoline< F: Fn(&AppSrc, u64) -> bool + Send + Sync + 'static, >( this: *mut gst_app_sys::GstAppSrc, offset: u64, f: glib_sys::gpointer, ) -> glib_sys::gboolean { let f: &F = &*(f as *const F); f(&from_glib_borrow(this), offset).to_glib() } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"seek-data\0".as_ptr() as *const _, Some(transmute(seek_data_trampoline::<F> as usize)), Box_::into_raw(f), ) } } pub fn connect_property_block_notify<F: Fn(&AppSrc) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_block_trampoline<F: Fn(&AppSrc) + Send + Sync + 'static>( this: *mut gst_app_sys::GstAppSrc, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::block\0".as_ptr() as *const _, Some(transmute(notify_block_trampoline::<F> as usize)), Box_::into_raw(f), ) } } pub fn connect_property_caps_notify<F: Fn(&AppSrc) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_caps_trampoline<F: Fn(&AppSrc) + Send + Sync + 'static>( this: *mut gst_app_sys::GstAppSrc, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::caps\0".as_ptr() as *const _, Some(transmute(notify_caps_trampoline::<F> as usize)), Box_::into_raw(f), ) } } pub fn connect_property_current_level_bytes_notify<F: Fn(&AppSrc) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_current_level_bytes_trampoline< F: Fn(&AppSrc) + Send + Sync + 'static, >( this: *mut gst_app_sys::GstAppSrc, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::current-level-bytes\0".as_ptr() as *const _, Some(transmute( notify_current_level_bytes_trampoline::<F> as usize, )), Box_::into_raw(f), ) } } pub fn connect_property_duration_notify<F: Fn(&AppSrc) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_duration_trampoline<F: Fn(&AppSrc) + Send + Sync + 'static>( this: *mut gst_app_sys::GstAppSrc, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::duration\0".as_ptr() as *const _, Some(transmute(notify_duration_trampoline::<F> as usize)), Box_::into_raw(f), ) } } pub fn connect_property_emit_signals_notify<F: Fn(&AppSrc) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_emit_signals_trampoline< F: Fn(&AppSrc) + Send + Sync + 'static, >( this: *mut gst_app_sys::GstAppSrc, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::emit-signals\0".as_ptr() as *const _, Some(transmute(notify_emit_signals_trampoline::<F> as usize)), Box_::into_raw(f), ) } } pub fn connect_property_format_notify<F: Fn(&AppSrc) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_format_trampoline<F: Fn(&AppSrc) + Send + Sync + 'static>( this: *mut gst_app_sys::GstAppSrc, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::format\0".as_ptr() as *const _, Some(transmute(notify_format_trampoline::<F> as usize)), Box_::into_raw(f), ) } } pub fn connect_property_is_live_notify<F: Fn(&AppSrc) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_is_live_trampoline<F: Fn(&AppSrc) + Send + Sync + 'static>( this: *mut gst_app_sys::GstAppSrc, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::is-live\0".as_ptr() as *const _, Some(transmute(notify_is_live_trampoline::<F> as usize)), Box_::into_raw(f), ) } } pub fn connect_property_max_bytes_notify<F: Fn(&AppSrc) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_max_bytes_trampoline<F: Fn(&AppSrc) + Send + Sync + 'static>( this: *mut gst_app_sys::GstAppSrc, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::max-bytes\0".as_ptr() as *const _, Some(transmute(notify_max_bytes_trampoline::<F> as usize)), Box_::into_raw(f), ) } } pub fn connect_property_max_latency_notify<F: Fn(&AppSrc) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_max_latency_trampoline< F: Fn(&AppSrc) + Send + Sync + 'static, >( this: *mut gst_app_sys::GstAppSrc, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::max-latency\0".as_ptr() as *const _, Some(transmute(notify_max_latency_trampoline::<F> as usize)), Box_::into_raw(f), ) } } pub fn connect_property_min_latency_notify<F: Fn(&AppSrc) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_min_latency_trampoline< F: Fn(&AppSrc) + Send + Sync + 'static, >( this: *mut gst_app_sys::GstAppSrc, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::min-latency\0".as_ptr() as *const _, Some(transmute(notify_min_latency_trampoline::<F> as usize)), Box_::into_raw(f), ) } } pub fn connect_property_min_percent_notify<F: Fn(&AppSrc) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_min_percent_trampoline< F: Fn(&AppSrc) + Send + Sync + 'static, >( this: *mut gst_app_sys::GstAppSrc, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::min-percent\0".as_ptr() as *const _, Some(transmute(notify_min_percent_trampoline::<F> as usize)), Box_::into_raw(f), ) } } pub fn connect_property_size_notify<F: Fn(&AppSrc) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_size_trampoline<F: Fn(&AppSrc) + Send + Sync + 'static>( this: *mut gst_app_sys::GstAppSrc, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::size\0".as_ptr() as *const _, Some(transmute(notify_size_trampoline::<F> as usize)), Box_::into_raw(f), ) } } pub fn connect_property_stream_type_notify<F: Fn(&AppSrc) + Send + Sync + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_stream_type_trampoline< F: Fn(&AppSrc) + Send + Sync + 'static, >( this: *mut gst_app_sys::GstAppSrc, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::stream-type\0".as_ptr() as *const _, Some(transmute(notify_stream_type_trampoline::<F> as usize)), Box_::into_raw(f), ) } } } unsafe impl Send for AppSrc {} unsafe impl Sync for AppSrc {}
get_duration
index.js
import { __decorate, __metadata, __param } from "tslib"; import { splitSignature } from '@ethersproject/bytes'; import BaseService from '../commons/BaseService'; import { eEthereumTxType, } from '../commons/types'; import { canBeEnsAddress } from '../commons/utils'; import { GovDelegationValidator } from '../commons/validators/methodValidators'; import { is0OrPositiveAmount, isEthAddress, isEthAddressOrENS, isPositiveAmount, } from '../commons/validators/paramValidators'; import { IGovernancePowerDelegationToken__factory } from './typechain/IGovernancePowerDelegationToken__factory'; export class GovernancePowerDelegationTokenService extends BaseService { constructor(provider) { super(provider, IGovernancePowerDelegationToken__factory); } async delegate({ user, delegatee, governanceToken }) { const txs = []; const governanceDelegationToken = this.getContractInstance(governanceToken); const delegateeAddress = await this.getDelegateeAddress(delegatee); const txCallback = this.generateTxCallback({ rawTxMethod: async () => governanceDelegationToken.populateTransaction.delegate(delegateeAddress), from: user, }); txs.push({ tx: txCallback, txType: eEthereumTxType.GOV_DELEGATION_ACTION, gas: this.generateTxPriceEstimation(txs, txCallback), }); return txs; } async delegateByType({ user, delegatee, delegationType, governanceToken }) { const txs = []; const governanceDelegationToken = this.getContractInstance(governanceToken); const delegateeAddress = await this.getDelegateeAddress(delegatee); const txCallback = this.generateTxCallback({ rawTxMethod: async () => governanceDelegationToken.populateTransaction.delegateByType(delegateeAddress, delegationType), from: user, }); txs.push({ tx: txCallback, txType: eEthereumTxType.GOV_DELEGATION_ACTION, gas: this.generateTxPriceEstimation(txs, txCallback), }); return txs; } async delegateBySig({ user, delegatee, expiry, signature, governanceToken }) { const txs = []; const governanceDelegationToken = this.getContractInstance(governanceToken); const nonce = await this.getNonce({ user, governanceToken }); const { v, r, s } = splitSignature(signature); const delegateeAddress = await this.getDelegateeAddress(delegatee); const txCallback = this.generateTxCallback({ rawTxMethod: async () => governanceDelegationToken.populateTransaction.delegateBySig(delegateeAddress, nonce, expiry, v, r, s), from: user, }); txs.push({ tx: txCallback, txType: eEthereumTxType.GOV_DELEGATION_ACTION, gas: this.generateTxPriceEstimation(txs, txCallback), }); return txs; } async delegateByTypeBySig({ user, delegatee, delegationType, expiry, signature, governanceToken, }) { const txs = []; const governanceDelegationToken = this.getContractInstance(governanceToken); const nonce = await this.getNonce({ user, governanceToken }); const { v, r, s } = splitSignature(signature); const delegateeAddress = await this.getDelegateeAddress(delegatee); const txCallback = this.generateTxCallback({ rawTxMethod: async () => governanceDelegationToken.populateTransaction.delegateByTypeBySig(delegateeAddress, delegationType, nonce, expiry, v, r, s), from: user, }); txs.push({ tx: txCallback, txType: eEthereumTxType.GOV_DELEGATION_ACTION, gas: this.generateTxPriceEstimation(txs, txCallback), }); return txs; } async prepareDelegateSignature({ delegatee, nonce, expiry, governanceTokenName, governanceToken, }) { const delegateeAddress = await this.getDelegateeAddress(delegatee); const { chainId } = await this.provider.getNetwork(); const typeData = { types: { EIP712Domain: [ { name: 'name', type: 'string' }, { name: 'chainId', type: 'uint256' }, { name: 'verifyingContract', type: 'address' }, ], Delegate: [ { name: 'delegatee', type: 'address' }, { name: 'nonce', type: 'uint256' }, { name: 'expiry', type: 'uint256' }, ], }, primaryType: 'Delegate', domain: { name: governanceTokenName, chainId, verifyingContract: governanceToken, }, message: { delegatee: delegateeAddress, nonce, expiry, }, }; return JSON.stringify(typeData); } async prepareDelegateByTypeSignature({ delegatee, type, nonce, expiry, governanceTokenName, governanceToken, }) { const delegateeAddress = await this.getDelegateeAddress(delegatee); const { chainId } = await this.provider.getNetwork(); const typeData = { types: { EIP712Domain: [ { name: 'name', type: 'string' }, { name: 'chainId', type: 'uint256' }, { name: 'verifyingContract', type: 'address' }, ], DelegateByType: [ { name: 'delegatee', type: 'address' }, { name: 'type', type: 'uint256' }, { name: 'nonce', type: 'uint256' }, { name: 'expiry', type: 'uint256' }, ], }, primaryType: 'DelegateByType', domain: { name: governanceTokenName, chainId, verifyingContract: governanceToken, }, message: { delegatee: delegateeAddress, type, nonce, expiry, }, }; return JSON.stringify(typeData); } async getDelegateeByType({ delegator, delegationType, governanceToken }) { const governanceDelegationToken = this.getContractInstance(governanceToken); return governanceDelegationToken.getDelegateeByType(delegator, delegationType); } async getPowerCurrent({ user, delegationType, governanceToken }) { const governanceDelegationToken = this.getContractInstance(governanceToken); return (await governanceDelegationToken.getPowerCurrent(user, delegationType)).toString(); } async getPowerAtBlock({ user, blockNumber, delegationType, governanceToken }) { const governanceDelegationToken = this.getContractInstance(governanceToken); return (await governanceDelegationToken.getPowerAtBlock(user, blockNumber, delegationType)).toString(); } async getNonce({ user, governanceToken }) { const governanceDelegationToken = this.getContractInstance(governanceToken); return (await governanceDelegationToken._nonces(user)).toString(); } async getDelegateeAddress(delegatee) { if (canBeEnsAddress(delegatee)) { const delegateeAddress = await this.provider.resolveName(delegatee); if (!delegateeAddress) throw new Error(`Address: ${delegatee} is not a valid ENS address`); return delegateeAddress; } return delegatee; } } __decorate([ GovDelegationValidator, __param(0, isEthAddress('user')), __param(0, isEthAddressOrENS('delegatee')), __param(0, isEthAddress('governanceToken')), __metadata("design:type", Function), __metadata("design:paramtypes", [Object]), __metadata("design:returntype", Promise) ], GovernancePowerDelegationTokenService.prototype, "delegate", null); __decorate([ GovDelegationValidator, __param(0, isEthAddress('user')), __param(0, isEthAddressOrENS('delegatee')), __param(0, isEthAddress('governanceToken')), __metadata("design:type", Function), __metadata("design:paramtypes", [Object]), __metadata("design:returntype", Promise) ], GovernancePowerDelegationTokenService.prototype, "delegateByType", null); __decorate([ GovDelegationValidator, __param(0, isEthAddress('user')), __param(0, isEthAddressOrENS('delegatee')), __param(0, isEthAddress('governanceToken')), __metadata("design:type", Function), __metadata("design:paramtypes", [Object]), __metadata("design:returntype", Promise) ], GovernancePowerDelegationTokenService.prototype, "delegateBySig", null); __decorate([ GovDelegationValidator, __param(0, isEthAddress('user')), __param(0, isEthAddressOrENS('delegatee')), __param(0, isEthAddress('governanceToken')), __metadata("design:type", Function), __metadata("design:paramtypes", [Object]), __metadata("design:returntype", Promise) ], GovernancePowerDelegationTokenService.prototype, "delegateByTypeBySig", null);
GovDelegationValidator, __param(0, isEthAddressOrENS('delegatee')), __param(0, isEthAddress('governanceToken')), __param(0, is0OrPositiveAmount('nonce')), __metadata("design:type", Function), __metadata("design:paramtypes", [Object]), __metadata("design:returntype", Promise) ], GovernancePowerDelegationTokenService.prototype, "prepareDelegateSignature", null); __decorate([ GovDelegationValidator, __param(0, isEthAddressOrENS('delegatee')), __param(0, isEthAddress('governanceToken')), __param(0, is0OrPositiveAmount('nonce')), __metadata("design:type", Function), __metadata("design:paramtypes", [Object]), __metadata("design:returntype", Promise) ], GovernancePowerDelegationTokenService.prototype, "prepareDelegateByTypeSignature", null); __decorate([ GovDelegationValidator, __param(0, isEthAddress('delegator')), __param(0, isEthAddress('governanceToken')), __metadata("design:type", Function), __metadata("design:paramtypes", [Object]), __metadata("design:returntype", Promise) ], GovernancePowerDelegationTokenService.prototype, "getDelegateeByType", null); __decorate([ GovDelegationValidator, __param(0, isEthAddress('user')), __param(0, isEthAddress('governanceToken')), __metadata("design:type", Function), __metadata("design:paramtypes", [Object]), __metadata("design:returntype", Promise) ], GovernancePowerDelegationTokenService.prototype, "getPowerCurrent", null); __decorate([ GovDelegationValidator, __param(0, isEthAddress('user')), __param(0, isEthAddress('governanceToken')), __param(0, isPositiveAmount('blockNumber')), __metadata("design:type", Function), __metadata("design:paramtypes", [Object]), __metadata("design:returntype", Promise) ], GovernancePowerDelegationTokenService.prototype, "getPowerAtBlock", null); __decorate([ GovDelegationValidator, __param(0, isEthAddress('user')), __param(0, isEthAddress('governanceToken')), __metadata("design:type", Function), __metadata("design:paramtypes", [Object]), __metadata("design:returntype", Promise) ], GovernancePowerDelegationTokenService.prototype, "getNonce", null); //# sourceMappingURL=index.js.map
__decorate([
logs_message_remapper.py
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2019-Present Datadog, Inc. import re # noqa: F401 import sys # noqa: F401 from datadog_api_client.v1.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) def lazy_import(): from datadog_api_client.v1.model.logs_message_remapper_type import LogsMessageRemapperType globals()["LogsMessageRemapperType"] = LogsMessageRemapperType class LogsMessageRemapper(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator
validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = {} validations = {} additional_properties_type = None _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { "sources": ([str],), # noqa: E501 "type": (LogsMessageRemapperType,), # noqa: E501 "is_enabled": (bool,), # noqa: E501 "name": (str,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { "sources": "sources", # noqa: E501 "type": "type", # noqa: E501 "is_enabled": "is_enabled", # noqa: E501 "name": "name", # noqa: E501 } _composed_schemas = {} required_properties = set( [ "_data_store", "_check_type", "_spec_property_naming", "_path_to_item", "_configuration", "_visited_composed_classes", ] ) @convert_js_args_to_python_args def __init__(self, type, *args, **kwargs): # noqa: E501 """LogsMessageRemapper - a model defined in OpenAPI Args: type (LogsMessageRemapperType): Keyword Args: sources ([str]): Array of source attributes.. defaults to ["msg"] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) is_enabled (bool): Whether or not the processor is enabled.. [optional] if omitted the server will use the default value of False # noqa: E501 name (str): Name of the processor.. [optional] # noqa: E501 """ sources = kwargs.get("sources", ["msg"]) _check_type = kwargs.pop("_check_type", True) _spec_property_naming = kwargs.pop("_spec_property_naming", False) _path_to_item = kwargs.pop("_path_to_item", ()) _configuration = kwargs.pop("_configuration", None) _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.sources = sources self.type = type for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys and self.additional_properties_type is None ): # discard variable. continue setattr(self, var_name, var_value)
variable value to the discriminator class name.
solution.py
def sol():
if __name__ == "__main__": sol()
a, b = 0, 1 for i in range(int(input())): a, b = b, a + b print(a)
Solution.py
""" You are given coins of different denominations and a total amount of money amount. Write a function to compute the fewest number of coins that you need to make up that amount. If that amount of money cannot be made up by any combination of the coins, return -1. You may assume that you have an infinite number of each kind of coin.   Example 1: Input: coins = [1,2,5], amount = 11 Output: 3 Explanation: 11 = 5 + 5 + 1 Example 2: Input: coins = [2], amount = 3 Output: -1 Example 3: Input: coins = [1], amount = 0 Output: 0 Example 4: Input: coins = [1], amount = 1 Output: 1 Example 5: Input: coins = [1], amount = 2 Output: 2   Constraints: 1 <= coins.length <= 12 1 <= coins[i] <= 231 - 1 0 <= amount <= 104 """ import collections class So
def coinChange(self, coins: List[int], amount: int) -> int: ### method 1: BFS, model it as a graph problem # queue = collections.deque() # queue.append((0, 0)) # visited = set() # while queue: # curr, step = queue.popleft() # if curr == amount: # return step # if curr in visited or curr > amount: # continue # visited.add(curr) # for coin in coins: # neighbor = curr + coin # if neighbor in visited: # continue # queue.append((neighbor, step + 1)) # return -1 ### method 2: dp dp = [0] + [None] * amount for i in range(1, amount + 1): candidates = list(filter(lambda x: x is not None, [dp[i - c] if i - c >= 0 else None for c in coins])) dp[i] = min(candidates) + 1 if candidates else None return dp[amount] if dp[amount] is not None else -1
lution:
index.js
import React from 'react'; import { parse } from 'react-docgen'; import CodeExample from '../../../components/CodeExample'; import ComponentHeader from '../../../components/ComponentHeader'; import PropTypeDescription from '../../../components/PropTypeDescription'; import Demo from './Demo'; // eslint-disable-next-line import demoCode from '!raw-loader!./Demo'; // eslint-disable-next-line import componentCode from '!raw-loader!@ringcentral-integration/widgets/components/SettingsPanel'; const SettingsPanelPage = () => { const info = parse(componentCode); return ( <div> <ComponentHeader name="SettingsPanel" description={info.description} /> <CodeExample code={demoCode} title="SettingsPanel Example"> <Demo /> </CodeExample> <PropTypeDescription componentInfo={info} /> </div>
}; export default SettingsPanelPage;
);
binaryOccupancyGrid2D_env.py
import numpy as np from gennav.envs.base import Environment from gennav.utils.common import RobotState from gennav.utils.geometry import Point from matplotlib import pyplot as plt class BinaryOccupancyGrid2DEnv(Environment): """Base class for a Binary Occupancy Grid 2D envrionment. Arguments: X (unsigned int) : The number of grid cells in the x-direction Y (unsigned int) : the number of grid cells in the y-direction """ def __init__(self, X=10, Y=10): super(BinaryOccupancyGrid2DEnv, self).__init__() self.X = X self.Y = Y self.scan = None self.robotPose = None self.scale = 5 self.grid = np.zeros((self.X * self.scale, self.Y * self.scale)) # Storing transforms self.transforms = {} self.mapTbot = { "from": "map", "to": "bot", "transform": self.scale * np.array( [[1, 0, int(self.X / 2)], [0, 1, int(self.Y / 2)], [0, 0, 1]] ).reshape(3, 3), } self.botTworld = {"from": "bot", "to": "world", "transform": np.empty((3, 3))} self.mapTworld = { "from": "map", "to": "world", "transform": np.dot(self.mapTbot["transform"], self.botTworld["transform"]), } self.transforms["mapTbot"] = self.mapTbot self.transforms["botTworld"] = self.botTworld self.transforms["mapTworld"] = self.mapTworld def update(self, scan, robotPose):
def fillOccupancy(self): """Function that fill the occupnacy grid on every update Assumptions: 1. RobotPose is considered (0, 0, 0) to accomodate the laser scan, which produces ranges wrt to the bot 2. The RobotPose in the occupancy grid is (X * scale_factor/2, Y * scale_factor /2, 0) 3. The attribute robotPose is the real pose of the robot wrt to the world Frame, thus it helps us to calculate the transform for trajectory and pose validity queries """ self.grid[:] = 0 ang_min, ang_max, ranges = self.scan angle_step = (ang_max - ang_min) / len(ranges) for i, rng in enumerate(ranges): # Check for obstacles if np.abs(rng) is not np.inf: x, y = ( rng * np.cos(ang_min + i * angle_step), rng * np.sin(ang_max + i * angle_step), ) newState = self.transform("bot", "map", RobotState(Point(x, y, 0))) x_, y_ = newState.position.x, newState.position.y # Checking if the range is within the grid, to mark them as occupied if 0 <= x_ < self.grid.shape[0] and 0 <= y_ < self.grid.shape[1]: if self.grid[int(x_)][int(-y_ - 1)] != 1: self.grid[int(x_)][int(-y_ - 1)] = 1 def get_status(self, state): """Get whether a given state is valid within the environment. Method for checking the validity of a given RobotPose in the environment. Args: state (gennav.utils.RobotState): State to be checked Returns: bool: True if state is valid otherwise False """ state = self.transform("world", "map", state) x, y = state.position.x, state.position.y if self.grid[x][-y - 1] == 1: return False else: return True def get_traj_status(self, traj): """Get whether a given trajectory is valid within the environment. Method for checking the validity of a trajectory in the given environment. Args: state (gennav.utils.Trajectory): Trajectory to be checked Returns: bool: True if state is valid otherwise False """ collision = False for i in range(len(traj.path) - 1): collision = self.check_line_segment( self.transform("world", "map", traj.path[i]), self.transform("world", "map", traj.path[i + 1]), ) if collision: break return not collision def transform(self, frame1, frame2, rsf1): """Transform robotPose from one pose to the other Args: frame1 (string) : from the frame (world, bot, map) frame2 (string) : to the frame (world, bot, map) rsf1 (gennav.utils.common.RobotState) : RobotState in frame1 Returns: rsf2 (gennav.utils.common.RobotState) : RobotState in frame2 """ # TODO: Make it more robust in terms of checking frames # Check if the required trnasform or the inverse of the transform exists frame = frame2 + "T" + frame1 frame_inv = frame1 + "T" + frame2 if frame in self.transforms.keys(): t_matrix = self.transforms[frame]["transform"] elif frame_inv in self.transforms.keys(): t_matrix = np.linalg.inv(self.transforms[frame_inv]["transform"]) else: raise Exception("Transform for the frames not found") # Transform using matrix multiplication pf2 = np.dot( t_matrix, np.array([rsf1.position.x, rsf1.position.y, 1]).reshape(3, 1) ) rsf2 = RobotState(position=Point(pf2[0].item(), pf2[1].item())) # Return RobotState return rsf2 def compute_transforms(self): """Computes transforms between frames Uses robot pose to compute transform between the world frame and the bot frame """ x, y, yaw = ( self.robotPose.position.x, self.robotPose.position.y, self.robotPose.orientation.yaw, ) worldTbot = np.array( [[np.cos(yaw), -np.sin(yaw), x], [np.sin(yaw), np.cos(yaw), y], [0, 0, 1]] ).reshape(3, 3) self.botTworld["transform"] = np.linalg.inv(worldTbot) self.mapTworld["transform"] = np.dot( self.mapTbot["transform"], self.botTworld["transform"] ) def visualise_grid(self): """ Helper function to visualise grid """ plt.imshow(self.grid, origin="bottom", cmap="binary") plt.show() def check_line_segment(self, state1, state2): """Checks whether a line segment is collision free in the environent Computes a line segment from the start point to the end point and parametrically checks if the grid cells they occupy are occupied. Args: state1 (gennav.utils.common.RobotState) : One end point state2 (gennav.utils.common.RobotState) : The other end point """ point1 = state1.position point2 = state2.position x1, y1 = point1.x, point1.y x2, y2 = point2.x, point2.y m = (y2 - y1) / (x2 - x1) collision = False for x in np.arange(x1, x2, 0.5): y = m * x - m * x1 + y1 if self.grid[int(x)][int(-y - 1)] == 1: collision = True break return collision
"""Function to update the environment Args: scan (list) : List of ang_min, ang_max, ranges robotPose (gennav.utils.RobotPose) : Current RobotPose """ self.scan = scan self.robotPose = robotPose self.compute_transforms() self.fillOccupancy()
functions.rs
use crate::utils::{ attr_by_name, attrs::is_proc_macro, is_must_use_ty, is_trait_impl_item, is_type_diagnostic_item, iter_input_pats, last_path_segment, match_def_path, must_use_attr, path_to_local, return_ty, snippet, snippet_opt, span_lint, span_lint_and_help, span_lint_and_then, trait_ref_of_method, type_is_unsafe_function, }; use if_chain::if_chain; use rustc_ast::ast::Attribute; use rustc_data_structures::fx::FxHashSet; use rustc_errors::Applicability; use rustc_hir as hir; use rustc_hir::intravisit; use rustc_hir::{def::Res, def_id::DefId, QPath}; use rustc_lint::{LateContext, LateLintPass, LintContext}; use rustc_middle::hir::map::Map; use rustc_middle::lint::in_external_macro; use rustc_middle::ty::{self, Ty}; use rustc_session::{declare_tool_lint, impl_lint_pass}; use rustc_span::source_map::Span; use rustc_span::sym; use rustc_target::spec::abi::Abi; use rustc_typeck::hir_ty_to_ty; declare_clippy_lint! { /// **What it does:** Checks for functions with too many parameters. /// /// **Why is this bad?** Functions with lots of parameters are considered bad /// style and reduce readability (“what does the 5th parameter mean?”). Consider /// grouping some parameters into a new type. /// /// **Known problems:** None. /// /// **Example:** /// ```rust /// # struct Color; /// fn foo(x: u32, y: u32, name: &str, c: Color, w: f32, h: f32, a: f32, b: f32) { /// // .. /// } /// ``` pub TOO_MANY_ARGUMENTS, complexity, "functions with too many arguments" } declare_clippy_lint! { /// **What it does:** Checks for functions with a large amount of lines. /// /// **Why is this bad?** Functions with a lot of lines are harder to understand /// due to having to look at a larger amount of code to understand what the /// function is doing. Consider splitting the body of the function into /// multiple functions. /// /// **Known problems:** None. /// /// **Example:** /// ```rust /// fn im_too_long() { /// println!(""); /// // ... 100 more LoC /// println!(""); /// } /// ``` pub TOO_MANY_LINES, pedantic, "functions with too many lines" } declare_clippy_lint! { /// **What it does:** Checks for public functions that dereference raw pointer /// arguments but are not marked unsafe. /// /// **Why is this bad?** The function should probably be marked `unsafe`, since /// for an arbitrary raw pointer, there is no way of telling for sure if it is /// valid. /// /// **Known problems:** /// /// * It does not check functions recursively so if the pointer is passed to a /// private non-`unsafe` function which does the dereferencing, the lint won't /// trigger. /// * It only checks for arguments whose type are raw pointers, not raw pointers /// got from an argument in some other way (`fn foo(bar: &[*const u8])` or /// `some_argument.get_raw_ptr()`). /// /// **Example:** /// ```rust,ignore /// // Bad /// pub fn foo(x: *const u8) { /// println!("{}", unsafe { *x }); /// } /// /// // Good /// pub unsafe fn foo(x: *const u8) { /// println!("{}", unsafe { *x }); /// } /// ``` pub NOT_UNSAFE_PTR_ARG_DEREF, correctness, "public functions dereferencing raw pointer arguments but not marked `unsafe`" } declare_clippy_lint! { /// **What it does:** Checks for a [`#[must_use]`] attribute on /// unit-returning functions and methods. /// /// [`#[must_use]`]: https://doc.rust-lang.org/reference/attributes/diagnostics.html#the-must_use-attribute /// /// **Why is this bad?** Unit values are useless. The attribute is likely /// a remnant of a refactoring that removed the return type. /// /// **Known problems:** None. /// /// **Examples:** /// ```rust /// #[must_use] /// fn useless() { } /// ``` pub MUST_USE_UNIT, style, "`#[must_use]` attribute on a unit-returning function / method" } declare_clippy_lint! { /// **What it does:** Checks for a [`#[must_use]`] attribute without /// further information on functions and methods that return a type already /// marked as `#[must_use]`. /// /// [`#[must_use]`]: https://doc.rust-lang.org/reference/attributes/diagnostics.html#the-must_use-attribute /// /// **Why is this bad?** The attribute isn't needed. Not using the result /// will already be reported. Alternatively, one can add some text to the /// attribute to improve the lint message. /// /// **Known problems:** None. /// /// **Examples:** /// ```rust /// #[must_use] /// fn double_must_use() -> Result<(), ()> { /// unimplemented!(); /// } /// ``` pub DOUBLE_MUST_USE, style, "`#[must_use]` attribute on a `#[must_use]`-returning function / method" } declare_clippy_lint! { /// **What it does:** Checks for public functions that have no /// [`#[must_use]`] attribute, but return something not already marked /// must-use, have no mutable arg and mutate no statics. /// /// [`#[must_use]`]: https://doc.rust-lang.org/reference/attributes/diagnostics.html#the-must_use-attribute /// /// **Why is this bad?** Not bad at all, this lint just shows places where /// you could add the attribute. /// /// **Known problems:** The lint only checks the arguments for mutable /// types without looking if they are actually changed. On the other hand, /// it also ignores a broad range of potentially interesting side effects, /// because we cannot decide whether the programmer intends the function to /// be called for the side effect or the result. Expect many false /// positives. At least we don't lint if the result type is unit or already /// `#[must_use]`. /// /// **Examples:** /// ```rust /// // this could be annotated with `#[must_use]`. /// fn id<T>(t: T) -> T { t } /// ``` pub MUST_USE_CANDIDATE, pedantic, "function or method that could take a `#[must_use]` attribute" } declare_clippy_lint! { /// **What it does:** Checks for public functions that return a `Result` /// with an `Err` type of `()`. It suggests using a custom type that /// implements [`std::error::Error`]. /// /// **Why is this bad?** Unit does not implement `Error` and carries no /// further information about what went wrong. /// /// **Known problems:** Of course, this lint assumes that `Result` is used /// for a fallible operation (which is after all the intended use). However /// code may opt to (mis)use it as a basic two-variant-enum. In that case, /// the suggestion is misguided, and the code should use a custom enum /// instead. /// /// **Examples:** /// ```rust /// pub fn read_u8() -> Result<u8, ()> { Err(()) } /// ``` /// should become /// ```rust,should_panic /// use std::fmt; /// /// #[derive(Debug)] /// pub struct EndOfStream; /// /// impl fmt::Display for EndOfStream { /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { /// write!(f, "End of Stream") /// } /// } /// /// impl std::error::Error for EndOfStream { } /// /// pub fn read_u8() -> Result<u8, EndOfStream> { Err(EndOfStream) } ///# fn main() { ///# read_u8().unwrap(); ///# } /// ``` /// /// Note that there are crates that simplify creating the error type, e.g. /// [`thiserror`](https://docs.rs/thiserror). pub RESULT_UNIT_ERR, style, "public function returning `Result` with an `Err` type of `()`" } #[derive(Copy, Clone)] pub struct Functions { threshold: u64, max_lines: u64, } impl Functions { pub fn new(threshold: u64, max_lines: u64) -> Self { Self { threshold, max_lines } } } impl_lint_pass!(Functions => [ TOO_MANY_ARGUMENTS, TOO_MANY_LINES, NOT_UNSAFE_PTR_ARG_DEREF, MUST_USE_UNIT, DOUBLE_MUST_USE, MUST_USE_CANDIDATE, RESULT_UNIT_ERR, ]); impl<'tcx> LateLintPass<'tcx> for Functions { fn check_fn( &mut self, cx: &LateContext<'tcx>, kind: intravisit::FnKind<'tcx>, decl: &'tcx hir::FnDecl<'_>, body: &'tcx hir::Body<'_>, span: Span, hir_id: hir::HirId, ) { let unsafety = match kind { intravisit::FnKind::ItemFn(_, _, hir::FnHeader { unsafety, .. }, _, _) => unsafety, intravisit::FnKind::Method(_, sig, _, _) => sig.header.unsafety, intravisit::FnKind::Closure(_) => return, }; // don't warn for implementations, it's not their fault if !is_trait_impl_item(cx, hir_id) { // don't lint extern functions decls, it's not their fault either match kind { intravisit::FnKind::Method( _, &hir::FnSig { header: hir::FnHeader { abi: Abi::Rust, .. }, .. }, _, _, ) | intravisit::FnKind::ItemFn(_, _, hir::FnHeader { abi: Abi::Rust, .. }, _, _) => { self.check_arg_number(cx, decl, span.with_hi(decl.output.span().hi())) }, _ => {}, } } Self::check_raw_ptr(cx, unsafety, decl, body, hir_id); self.check_line_number(cx, span, body); } fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'_>) { let attr = must_use_attr(&item.attrs); if let hir::ItemKind::Fn(ref sig, ref _generics, ref body_id) = item.kind { let is_public = cx.access_levels.is_exported(item.hir_id()); let fn_header_span = item.span.with_hi(sig.decl.output.span().hi()); if is_public { check_result_unit_err(cx, &sig.decl, item.span, fn_header_span); } if let Some(attr) = attr { check_needless_must_use(cx, &sig.decl, item.hir_id(), item.span, fn_header_span, attr); return; } if is_public && !is_proc_macro(cx.sess(), &item.attrs) && attr_by_name(&item.attrs, "no_mangle").is_none() { check_must_use_candidate( cx, &sig.decl, cx.tcx.hir().body(*body_id), item.span, item.hir_id(), item.span.with_hi(sig.decl.output.span().hi()), "this function could have a `#[must_use]` attribute", ); } } } fn check_impl_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::ImplItem<'_>) { if let hir::ImplItemKind::Fn(ref sig, ref body_id) = item.kind { let is_public = cx.access_levels.is_exported(item.hir_id()); let fn_header_span = item.span.with_hi(sig.decl.output.span().hi()); if is_public && trait_ref_of_method(cx, item.hir_id()).is_none() { check_result_unit_err(cx, &sig.decl, item.span, fn_header_span); } let attr = must_use_attr(&item.attrs); if let Some(attr) = attr { check_needless_must_use(cx, &sig.decl, item.hir_id(), item.span, fn_header_span, attr); } else if is_public && !is_proc_macro(cx.sess(), &item.attrs) && trait_ref_of_method(cx, item.hir_id()).is_none() { check_must_use_candidate( cx, &sig.decl, cx.tcx.hir().body(*body_id), item.span, item.hir_id(), item.span.with_hi(sig.decl.output.span().hi()), "this method could have a `#[must_use]` attribute", ); } } } fn check_trait_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::TraitItem<'_>) { if let hir::TraitItemKind::Fn(ref sig, ref eid) = item.kind { // don't lint extern functions decls, it's not their fault if sig.header.abi == Abi::Rust { self.check_arg_number(cx, &sig.decl, item.span.with_hi(sig.decl.output.span().hi())); } let is_public = cx.access_levels.is_exported(item.hir_id()); let fn_header_span = item.span.with_hi(sig.decl.output.span().hi()); if is_public { check_result_unit_err(cx, &sig.decl, item.span, fn_header_span); } let attr = must_use_attr(&item.attrs); if let Some(attr) = attr { check_needless_must_use(cx, &sig.decl, item.hir_id(), item.span, fn_header_span, attr); } if let hir::TraitFn::Provided(eid) = *eid { let body = cx.tcx.hir().body(eid); Self::check_raw_ptr(cx, sig.header.unsafety, &sig.decl, body, item.hir_id()); if attr.is_none() && is_public && !is_proc_macro(cx.sess(), &item.attrs) { check_must_use_candidate( cx, &sig.decl, body, item.span, item.hir_id(), item.span.with_hi(sig.decl.output.span().hi()), "this method could have a `#[must_use]` attribute", ); } } } } } impl<'tcx> Functions { fn check_arg_number(self, cx: &LateContext<'_>, decl: &hir::FnDecl<'_>, fn_span: Span) { let args = decl.inputs.len() as u64; if args > self.threshold { span_lint( cx, TOO_MANY_ARGUMENTS, fn_span, &format!("this function has too many arguments ({}/{})", args, self.threshold), ); } } fn check_line_number(self, cx: &LateContext<'_>, span: Span, body: &'tcx hir::Body<'_>) { if in_external_macro(cx.sess(), span) { return; } let code_snippet = snippet(cx, body.value.span, ".."); let mut line_count: u64 = 0; let mut in_comment = false; let mut code_in_line; // Skip the surrounding function decl. let start_brace_idx = code_snippet.find('{').map_or(0, |i| i + 1); let end_brace_idx = code_snippet.rfind('}').unwrap_or_else(|| code_snippet.len()); let function_lines = code_snippet[start_brace_idx..end_brace_idx].lines(); for mut line in function_lines { code_in_line = false; loop { line = line.trim_start(); if line.is_empty() { break; } if in_comment { if let Some(i) = line.find("*/") { line = &line[i + 2..]; in_comment = false; continue; } } else { let multi_idx = line.find("/*").unwrap_or_else(|| line.len()); let single_idx = line.find("//").unwrap_or_else(|| line.len()); code_in_line |= multi_idx > 0 && single_idx > 0; // Implies multi_idx is below line.len() if multi_idx < single_idx { line = &line[multi_idx + 2..]; in_comment = true; continue; } } break; } if code_in_line { line_count += 1; } } if line_count > self.max_lines { span_lint( cx, TOO_MANY_LINES, span, &format!("this function has too many lines ({}/{})", line_count, self.max_lines), ) } } fn check_raw_ptr( cx: &LateContext<'tcx>, unsafety: hir::Unsafety, decl: &'tcx hir::FnDecl<'_>, body: &'tcx hir::Body<'_>, hir_id: hir::HirId, ) { let expr = &body.value; if unsafety == hir::Unsafety::Normal && cx.access_levels.is_exported(hir_id) { let raw_ptrs = iter_input_pats(decl, body) .zip(decl.inputs.iter()) .filter_map(|(arg, ty)| raw_ptr_arg(arg, ty)) .collect::<FxHashSet<_>>(); if !raw_ptrs.is_empty() { let typeck_results = cx.tcx.typeck_body(body.id()); let mut v = DerefVisitor { cx, ptrs: raw_ptrs, typeck_results, }; intravisit::walk_expr(&mut v, expr); } } } } fn check_result_unit_err(cx: &LateContext<'_>, decl: &hir::FnDecl<'_>, item_span: Span, fn_header_span: Span) { if_chain! { if !in_external_macro(cx.sess(), item_span); if let hir::FnRetTy::Return(ref ty) = decl.output; if let hir::TyKind::Path(ref qpath) = ty.kind; if is_type_diagnostic_item(cx, hir_ty_to_ty(cx.tcx, ty), sym::result_type); if let Some(ref args) = last_path_segment(qpath).args; if let [_, hir::GenericArg::Type(ref err_ty)] = args.args;
cx, RESULT_UNIT_ERR, fn_header_span, "this returns a `Result<_, ()>", None, "use a custom Error type instead", ); } } } fn check_needless_must_use( cx: &LateContext<'_>, decl: &hir::FnDecl<'_>, item_id: hir::HirId, item_span: Span, fn_header_span: Span, attr: &Attribute, ) { if in_external_macro(cx.sess(), item_span) { return; } if returns_unit(decl) { span_lint_and_then( cx, MUST_USE_UNIT, fn_header_span, "this unit-returning function has a `#[must_use]` attribute", |diag| { diag.span_suggestion( attr.span, "remove the attribute", "".into(), Applicability::MachineApplicable, ); }, ); } else if !attr.is_value_str() && is_must_use_ty(cx, return_ty(cx, item_id)) { span_lint_and_help( cx, DOUBLE_MUST_USE, fn_header_span, "this function has an empty `#[must_use]` attribute, but returns a type already marked as `#[must_use]`", None, "either add some descriptive text or remove the attribute", ); } } fn check_must_use_candidate<'tcx>( cx: &LateContext<'tcx>, decl: &'tcx hir::FnDecl<'_>, body: &'tcx hir::Body<'_>, item_span: Span, item_id: hir::HirId, fn_span: Span, msg: &str, ) { if has_mutable_arg(cx, body) || mutates_static(cx, body) || in_external_macro(cx.sess(), item_span) || returns_unit(decl) || !cx.access_levels.is_exported(item_id) || is_must_use_ty(cx, return_ty(cx, item_id)) { return; } span_lint_and_then(cx, MUST_USE_CANDIDATE, fn_span, msg, |diag| { if let Some(snippet) = snippet_opt(cx, fn_span) { diag.span_suggestion( fn_span, "add the attribute", format!("#[must_use] {}", snippet), Applicability::MachineApplicable, ); } }); } fn returns_unit(decl: &hir::FnDecl<'_>) -> bool { match decl.output { hir::FnRetTy::DefaultReturn(_) => true, hir::FnRetTy::Return(ref ty) => match ty.kind { hir::TyKind::Tup(ref tys) => tys.is_empty(), hir::TyKind::Never => true, _ => false, }, } } fn has_mutable_arg(cx: &LateContext<'_>, body: &hir::Body<'_>) -> bool { let mut tys = FxHashSet::default(); body.params.iter().any(|param| is_mutable_pat(cx, &param.pat, &mut tys)) } fn is_mutable_pat(cx: &LateContext<'_>, pat: &hir::Pat<'_>, tys: &mut FxHashSet<DefId>) -> bool { if let hir::PatKind::Wild = pat.kind { return false; // ignore `_` patterns } if cx.tcx.has_typeck_results(pat.hir_id.owner.to_def_id()) { is_mutable_ty(cx, &cx.tcx.typeck(pat.hir_id.owner).pat_ty(pat), pat.span, tys) } else { false } } static KNOWN_WRAPPER_TYS: &[&[&str]] = &[&["alloc", "rc", "Rc"], &["std", "sync", "Arc"]]; fn is_mutable_ty<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, span: Span, tys: &mut FxHashSet<DefId>) -> bool { match *ty.kind() { // primitive types are never mutable ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str => false, ty::Adt(ref adt, ref substs) => { tys.insert(adt.did) && !ty.is_freeze(cx.tcx.at(span), cx.param_env) || KNOWN_WRAPPER_TYS.iter().any(|path| match_def_path(cx, adt.did, path)) && substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys)) }, ty::Tuple(ref substs) => substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys)), ty::Array(ty, _) | ty::Slice(ty) => is_mutable_ty(cx, ty, span, tys), ty::RawPtr(ty::TypeAndMut { ty, mutbl }) | ty::Ref(_, ty, mutbl) => { mutbl == hir::Mutability::Mut || is_mutable_ty(cx, ty, span, tys) }, // calling something constitutes a side effect, so return true on all callables // also never calls need not be used, so return true for them, too _ => true, } } fn raw_ptr_arg(arg: &hir::Param<'_>, ty: &hir::Ty<'_>) -> Option<hir::HirId> { if let (&hir::PatKind::Binding(_, id, _, _), &hir::TyKind::Ptr(_)) = (&arg.pat.kind, &ty.kind) { Some(id) } else { None } } struct DerefVisitor<'a, 'tcx> { cx: &'a LateContext<'tcx>, ptrs: FxHashSet<hir::HirId>, typeck_results: &'a ty::TypeckResults<'tcx>, } impl<'a, 'tcx> intravisit::Visitor<'tcx> for DerefVisitor<'a, 'tcx> { type Map = Map<'tcx>; fn visit_expr(&mut self, expr: &'tcx hir::Expr<'_>) { match expr.kind { hir::ExprKind::Call(ref f, args) => { let ty = self.typeck_results.expr_ty(f); if type_is_unsafe_function(self.cx, ty) { for arg in args { self.check_arg(arg); } } }, hir::ExprKind::MethodCall(_, _, args, _) => { let def_id = self.typeck_results.type_dependent_def_id(expr.hir_id).unwrap(); let base_type = self.cx.tcx.type_of(def_id); if type_is_unsafe_function(self.cx, base_type) { for arg in args { self.check_arg(arg); } } }, hir::ExprKind::Unary(hir::UnOp::Deref, ref ptr) => self.check_arg(ptr), _ => (), } intravisit::walk_expr(self, expr); } fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> { intravisit::NestedVisitorMap::None } } impl<'a, 'tcx> DerefVisitor<'a, 'tcx> { fn check_arg(&self, ptr: &hir::Expr<'_>) { if let Some(id) = path_to_local(ptr) { if self.ptrs.contains(&id) { span_lint( self.cx, NOT_UNSAFE_PTR_ARG_DEREF, ptr.span, "this public function dereferences a raw pointer but is not marked `unsafe`", ); } } } } struct StaticMutVisitor<'a, 'tcx> { cx: &'a LateContext<'tcx>, mutates_static: bool, } impl<'a, 'tcx> intravisit::Visitor<'tcx> for StaticMutVisitor<'a, 'tcx> { type Map = Map<'tcx>; fn visit_expr(&mut self, expr: &'tcx hir::Expr<'_>) { use hir::ExprKind::{AddrOf, Assign, AssignOp, Call, MethodCall}; if self.mutates_static { return; } match expr.kind { Call(_, args) | MethodCall(_, _, args, _) => { let mut tys = FxHashSet::default(); for arg in args { if self.cx.tcx.has_typeck_results(arg.hir_id.owner.to_def_id()) && is_mutable_ty( self.cx, self.cx.tcx.typeck(arg.hir_id.owner).expr_ty(arg), arg.span, &mut tys, ) && is_mutated_static(arg) { self.mutates_static = true; return; } tys.clear(); } }, Assign(ref target, ..) | AssignOp(_, ref target, _) | AddrOf(_, hir::Mutability::Mut, ref target) => { self.mutates_static |= is_mutated_static(target) }, _ => {}, } } fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> { intravisit::NestedVisitorMap::None } } fn is_mutated_static(e: &hir::Expr<'_>) -> bool { use hir::ExprKind::{Field, Index, Path}; match e.kind { Path(QPath::Resolved(_, path)) => !matches!(path.res, Res::Local(_)), Path(_) => true, Field(ref inner, _) | Index(ref inner, _) => is_mutated_static(inner), _ => false, } } fn mutates_static<'tcx>(cx: &LateContext<'tcx>, body: &'tcx hir::Body<'_>) -> bool { let mut v = StaticMutVisitor { cx, mutates_static: false, }; intravisit::walk_expr(&mut v, &body.value); v.mutates_static }
if let hir::TyKind::Tup(t) = err_ty.kind; if t.is_empty(); then { span_lint_and_help(
rook.go
/* Copyright 2019 The Crossplane Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controller import ( ctrl "sigs.k8s.io/controller-runtime" "github.com/crossplane/provider-rook/pkg/controller/database/cockroach" "github.com/crossplane/provider-rook/pkg/controller/database/yugabyte" ) // Controllers passes down config and adds individual controllers to the manager. type Controllers struct{} // SetupWithManager adds all Rook controllers to the manager. func (c *Controllers) SetupWithManager(mgr ctrl.Manager) error { controllers := []interface { SetupWithManager(ctrl.Manager) error }{ &cockroach.ClaimController{}, &cockroach.ClaimDefaultingController{}, &cockroach.ClaimSchedulingController{}, &cockroach.Controller{}, &yugabyte.ClaimController{}, &yugabyte.ClaimDefaultingController{}, &yugabyte.ClaimSchedulingController{}, &yugabyte.Controller{}, } for _, c := range controllers { if err := c.SetupWithManager(mgr); err != nil
} return nil }
{ return err }
forms.py
from django import forms from .models import Post class PostForm(forms.ModelForm):
class Meta: model = Post exclude = ('timestamp' ,'owner')
google-charts.js
/******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; /******/ /******/ // The require function /******/ function
(moduleId) { /******/ /******/ // Check if module is in cache /******/ if(installedModules[moduleId]) { /******/ return installedModules[moduleId].exports; /******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ i: moduleId, /******/ l: false, /******/ exports: {} /******/ }; /******/ /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); /******/ /******/ // Flag the module as loaded /******/ module.l = true; /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; /******/ /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; /******/ /******/ // define getter function for harmony exports /******/ __webpack_require__.d = function(exports, name, getter) { /******/ if(!__webpack_require__.o(exports, name)) { /******/ Object.defineProperty(exports, name, { enumerable: true, get: getter }); /******/ } /******/ }; /******/ /******/ // define __esModule on exports /******/ __webpack_require__.r = function(exports) { /******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) { /******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' }); /******/ } /******/ Object.defineProperty(exports, '__esModule', { value: true }); /******/ }; /******/ /******/ // create a fake namespace object /******/ // mode & 1: value is a module id, require it /******/ // mode & 2: merge all properties of value into the ns /******/ // mode & 4: return value when already ns object /******/ // mode & 8|1: behave like require /******/ __webpack_require__.t = function(value, mode) { /******/ if(mode & 1) value = __webpack_require__(value); /******/ if(mode & 8) return value; /******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value; /******/ var ns = Object.create(null); /******/ __webpack_require__.r(ns); /******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value }); /******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key)); /******/ return ns; /******/ }; /******/ /******/ // getDefaultExport function for compatibility with non-harmony modules /******/ __webpack_require__.n = function(module) { /******/ var getter = module && module.__esModule ? /******/ function getDefault() { return module['default']; } : /******/ function getModuleExports() { return module; }; /******/ __webpack_require__.d(getter, 'a', getter); /******/ return getter; /******/ }; /******/ /******/ // Object.prototype.hasOwnProperty.call /******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; /******/ /******/ // __webpack_public_path__ /******/ __webpack_require__.p = "/"; /******/ /******/ /******/ // Load entry module and return exports /******/ return __webpack_require__(__webpack_require__.s = 140); /******/ }) /************************************************************************/ /******/ ({ /***/ "./resources/metronic/js/pages/features/charts/google-charts.js": /*!**********************************************************************!*\ !*** ./resources/metronic/js/pages/features/charts/google-charts.js ***! \**********************************************************************/ /*! no static exports found */ /***/ (function(module, exports, __webpack_require__) { "use strict"; eval(" // Class definition\n\nvar KTGoogleChartsDemo = function () {\n // Private functions\n var main = function main() {\n // GOOGLE CHARTS INIT\n google.load('visualization', '1', {\n packages: ['corechart', 'bar', 'line']\n });\n google.setOnLoadCallback(function () {\n KTGoogleChartsDemo.runDemos();\n });\n };\n\n var demoColumnCharts = function demoColumnCharts() {\n // COLUMN CHART\n var data = new google.visualization.DataTable();\n data.addColumn('timeofday', 'Time of Day');\n data.addColumn('number', 'Motivation Level');\n data.addColumn('number', 'Energy Level');\n data.addRows([[{\n v: [8, 0, 0],\n f: '8 am'\n }, 1, .25], [{\n v: [9, 0, 0],\n f: '9 am'\n }, 2, .5], [{\n v: [10, 0, 0],\n f: '10 am'\n }, 3, 1], [{\n v: [11, 0, 0],\n f: '11 am'\n }, 4, 2.25], [{\n v: [12, 0, 0],\n f: '12 pm'\n }, 5, 2.25], [{\n v: [13, 0, 0],\n f: '1 pm'\n }, 6, 3], [{\n v: [14, 0, 0],\n f: '2 pm'\n }, 7, 4], [{\n v: [15, 0, 0],\n f: '3 pm'\n }, 8, 5.25], [{\n v: [16, 0, 0],\n f: '4 pm'\n }, 9, 7.5], [{\n v: [17, 0, 0],\n f: '5 pm'\n }, 10, 10]]);\n var options = {\n title: 'Motivation and Energy Level Throughout the Day',\n focusTarget: 'category',\n hAxis: {\n title: 'Time of Day',\n format: 'h:mm a',\n viewWindow: {\n min: [7, 30, 0],\n max: [17, 30, 0]\n }\n },\n vAxis: {\n title: 'Rating (scale of 1-10)'\n },\n colors: ['#6e4ff5', '#fe3995']\n };\n var chart = new google.visualization.ColumnChart(document.getElementById('kt_gchart_1'));\n chart.draw(data, options);\n var chart = new google.visualization.ColumnChart(document.getElementById('kt_gchart_2'));\n chart.draw(data, options);\n };\n\n var demoPieCharts = function demoPieCharts() {\n var data = google.visualization.arrayToDataTable([['Task', 'Hours per Day'], ['Work', 11], ['Eat', 2], ['Commute', 2], ['Watch TV', 2], ['Sleep', 7]]);\n var options = {\n title: 'My Daily Activities',\n colors: ['#fe3995', '#f6aa33', '#6e4ff5', '#2abe81', '#c7d2e7', '#593ae1']\n };\n var chart = new google.visualization.PieChart(document.getElementById('kt_gchart_3'));\n chart.draw(data, options);\n var options = {\n pieHole: 0.4,\n colors: ['#fe3995', '#f6aa33', '#6e4ff5', '#2abe81', '#c7d2e7', '#593ae1']\n };\n var chart = new google.visualization.PieChart(document.getElementById('kt_gchart_4'));\n chart.draw(data, options);\n };\n\n var demoLineCharts = function demoLineCharts() {\n // LINE CHART\n var data = new google.visualization.DataTable();\n data.addColumn('number', 'Day');\n data.addColumn('number', 'Guardians of the Galaxy');\n data.addColumn('number', 'The Avengers');\n data.addColumn('number', 'Transformers: Age of Extinction');\n data.addRows([[1, 37.8, 80.8, 41.8], [2, 30.9, 69.5, 32.4], [3, 25.4, 57, 25.7], [4, 11.7, 18.8, 10.5], [5, 11.9, 17.6, 10.4], [6, 8.8, 13.6, 7.7], [7, 7.6, 12.3, 9.6], [8, 12.3, 29.2, 10.6], [9, 16.9, 42.9, 14.8], [10, 12.8, 30.9, 11.6], [11, 5.3, 7.9, 4.7], [12, 6.6, 8.4, 5.2], [13, 4.8, 6.3, 3.6], [14, 4.2, 6.2, 3.4]]);\n var options = {\n chart: {\n title: 'Box Office Earnings in First Two Weeks of Opening',\n subtitle: 'in millions of dollars (USD)'\n },\n colors: ['#6e4ff5', '#f6aa33', '#fe3995']\n };\n var chart = new google.charts.Line(document.getElementById('kt_gchart_5'));\n chart.draw(data, options);\n };\n\n return {\n // public functions\n init: function init() {\n main();\n },\n runDemos: function runDemos() {\n demoColumnCharts();\n demoLineCharts();\n demoPieCharts();\n }\n };\n}();\n\nKTGoogleChartsDemo.init();//# sourceURL=[module]\n//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"sources":["webpack:///./resources/metronic/js/pages/features/charts/google-charts.js?4e0f"],"names":["KTGoogleChartsDemo","main","google","load","packages","setOnLoadCallback","runDemos","demoColumnCharts","data","visualization","DataTable","addColumn","addRows","v","f","options","title","focusTarget","hAxis","format","viewWindow","min","max","vAxis","colors","chart","ColumnChart","document","getElementById","draw","demoPieCharts","arrayToDataTable","PieChart","pieHole","demoLineCharts","subtitle","charts","Line","init"],"mappings":"CACA;;AACA,IAAIA,kBAAkB,GAAG,YAAW;AAEhC;AAEA,MAAIC,IAAI,GAAG,SAAPA,IAAO,GAAW;AAClB;AACAC,UAAM,CAACC,IAAP,CAAY,eAAZ,EAA6B,GAA7B,EAAkC;AAC9BC,cAAQ,EAAE,CAAC,WAAD,EAAc,KAAd,EAAqB,MAArB;AADoB,KAAlC;AAIAF,UAAM,CAACG,iBAAP,CAAyB,YAAW;AAChCL,wBAAkB,CAACM,QAAnB;AACH,KAFD;AAGH,GATD;;AAWA,MAAIC,gBAAgB,GAAG,SAAnBA,gBAAmB,GAAW;AAC9B;AACA,QAAIC,IAAI,GAAG,IAAIN,MAAM,CAACO,aAAP,CAAqBC,SAAzB,EAAX;AACAF,QAAI,CAACG,SAAL,CAAe,WAAf,EAA4B,aAA5B;AACAH,QAAI,CAACG,SAAL,CAAe,QAAf,EAAyB,kBAAzB;AACAH,QAAI,CAACG,SAAL,CAAe,QAAf,EAAyB,cAAzB;AAEAH,QAAI,CAACI,OAAL,CAAa,CACT,CAAC;AACGC,OAAC,EAAE,CAAC,CAAD,EAAI,CAAJ,EAAO,CAAP,CADN;AAEGC,OAAC,EAAE;AAFN,KAAD,EAGG,CAHH,EAGM,GAHN,CADS,EAKT,CAAC;AACGD,OAAC,EAAE,CAAC,CAAD,EAAI,CAAJ,EAAO,CAAP,CADN;AAEGC,OAAC,EAAE;AAFN,KAAD,EAGG,CAHH,EAGM,EAHN,CALS,EAST,CAAC;AACGD,OAAC,EAAE,CAAC,EAAD,EAAK,CAAL,EAAQ,CAAR,CADN;AAEGC,OAAC,EAAE;AAFN,KAAD,EAGG,CAHH,EAGM,CAHN,CATS,EAaT,CAAC;AACGD,OAAC,EAAE,CAAC,EAAD,EAAK,CAAL,EAAQ,CAAR,CADN;AAEGC,OAAC,EAAE;AAFN,KAAD,EAGG,CAHH,EAGM,IAHN,CAbS,EAiBT,CAAC;AACGD,OAAC,EAAE,CAAC,EAAD,EAAK,CAAL,EAAQ,CAAR,CADN;AAEGC,OAAC,EAAE;AAFN,KAAD,EAGG,CAHH,EAGM,IAHN,CAjBS,EAqBT,CAAC;AACGD,OAAC,EAAE,CAAC,EAAD,EAAK,CAAL,EAAQ,CAAR,CADN;AAEGC,OAAC,EAAE;AAFN,KAAD,EAGG,CAHH,EAGM,CAHN,CArBS,EAyBT,CAAC;AACGD,OAAC,EAAE,CAAC,EAAD,EAAK,CAAL,EAAQ,CAAR,CADN;AAEGC,OAAC,EAAE;AAFN,KAAD,EAGG,CAHH,EAGM,CAHN,CAzBS,EA6BT,CAAC;AACGD,OAAC,EAAE,CAAC,EAAD,EAAK,CAAL,EAAQ,CAAR,CADN;AAEGC,OAAC,EAAE;AAFN,KAAD,EAGG,CAHH,EAGM,IAHN,CA7BS,EAiCT,CAAC;AACGD,OAAC,EAAE,CAAC,EAAD,EAAK,CAAL,EAAQ,CAAR,CADN;AAEGC,OAAC,EAAE;AAFN,KAAD,EAGG,CAHH,EAGM,GAHN,CAjCS,EAqCT,CAAC;AACGD,OAAC,EAAE,CAAC,EAAD,EAAK,CAAL,EAAQ,CAAR,CADN;AAEGC,OAAC,EAAE;AAFN,KAAD,EAGG,EAHH,EAGO,EAHP,CArCS,CAAb;AA2CA,QAAIC,OAAO,GAAG;AACVC,WAAK,EAAE,gDADG;AAEVC,iBAAW,EAAE,UAFH;AAGVC,WAAK,EAAE;AACHF,aAAK,EAAE,aADJ;AAEHG,cAAM,EAAE,QAFL;AAGHC,kBAAU,EAAE;AACRC,aAAG,EAAE,CAAC,CAAD,EAAI,EAAJ,EAAQ,CAAR,CADG;AAERC,aAAG,EAAE,CAAC,EAAD,EAAK,EAAL,EAAS,CAAT;AAFG;AAHT,OAHG;AAWVC,WAAK,EAAE;AACHP,aAAK,EAAE;AADJ,OAXG;AAcVQ,YAAM,EAAE,CAAC,SAAD,EAAY,SAAZ;AAdE,KAAd;AAiBA,QAAIC,KAAK,GAAG,IAAIvB,MAAM,CAACO,aAAP,CAAqBiB,WAAzB,CAAqCC,QAAQ,CAACC,cAAT,CAAwB,aAAxB,CAArC,CAAZ;AACAH,SAAK,CAACI,IAAN,CAAWrB,IAAX,EAAiBO,OAAjB;AAEA,QAAIU,KAAK,GAAG,IAAIvB,MAAM,CAACO,aAAP,CAAqBiB,WAAzB,CAAqCC,QAAQ,CAACC,cAAT,CAAwB,aAAxB,CAArC,CAAZ;AACAH,SAAK,CAACI,IAAN,CAAWrB,IAAX,EAAiBO,OAAjB;AACH,GAxED;;AA0EA,MAAIe,aAAa,GAAG,SAAhBA,aAAgB,GAAW;AAC3B,QAAItB,IAAI,GAAGN,MAAM,CAACO,aAAP,CAAqBsB,gBAArB,CAAsC,CAC7C,CAAC,MAAD,EAAS,eAAT,CAD6C,EAE7C,CAAC,MAAD,EAAS,EAAT,CAF6C,EAG7C,CAAC,KAAD,EAAQ,CAAR,CAH6C,EAI7C,CAAC,SAAD,EAAY,CAAZ,CAJ6C,EAK7C,CAAC,UAAD,EAAa,CAAb,CAL6C,EAM7C,CAAC,OAAD,EAAU,CAAV,CAN6C,CAAtC,CAAX;AASA,QAAIhB,OAAO,GAAG;AACVC,WAAK,EAAE,qBADG;AAEVQ,YAAM,EAAE,CAAC,SAAD,EAAY,SAAZ,EAAuB,SAAvB,EAAkC,SAAlC,EAA6C,SAA7C,EAAwD,SAAxD;AAFE,KAAd;AAKA,QAAIC,KAAK,GAAG,IAAIvB,MAAM,CAACO,aAAP,CAAqBuB,QAAzB,CAAkCL,QAAQ,CAACC,cAAT,CAAwB,aAAxB,CAAlC,CAAZ;AACAH,SAAK,CAACI,IAAN,CAAWrB,IAAX,EAAiBO,OAAjB;AAEA,QAAIA,OAAO,GAAG;AACVkB,aAAO,EAAE,GADC;AAEVT,YAAM,EAAE,CAAC,SAAD,EAAY,SAAZ,EAAuB,SAAvB,EAAkC,SAAlC,EAA6C,SAA7C,EAAwD,SAAxD;AAFE,KAAd;AAKA,QAAIC,KAAK,GAAG,IAAIvB,MAAM,CAACO,aAAP,CAAqBuB,QAAzB,CAAkCL,QAAQ,CAACC,cAAT,CAAwB,aAAxB,CAAlC,CAAZ;AACAH,SAAK,CAACI,IAAN,CAAWrB,IAAX,EAAiBO,OAAjB;AACH,GAzBD;;AA2BA,MAAImB,cAAc,GAAG,SAAjBA,cAAiB,GAAW;AAC5B;AACA,QAAI1B,IAAI,GAAG,IAAIN,MAAM,CAACO,aAAP,CAAqBC,SAAzB,EAAX;AACAF,QAAI,CAACG,SAAL,CAAe,QAAf,EAAyB,KAAzB;AACAH,QAAI,CAACG,SAAL,CAAe,QAAf,EAAyB,yBAAzB;AACAH,QAAI,CAACG,SAAL,CAAe,QAAf,EAAyB,cAAzB;AACAH,QAAI,CAACG,SAAL,CAAe,QAAf,EAAyB,iCAAzB;AAEAH,QAAI,CAACI,OAAL,CAAa,CACT,CAAC,CAAD,EAAI,IAAJ,EAAU,IAAV,EAAgB,IAAhB,CADS,EAET,CAAC,CAAD,EAAI,IAAJ,EAAU,IAAV,EAAgB,IAAhB,CAFS,EAGT,CAAC,CAAD,EAAI,IAAJ,EAAU,EAAV,EAAc,IAAd,CAHS,EAIT,CAAC,CAAD,EAAI,IAAJ,EAAU,IAAV,EAAgB,IAAhB,CAJS,EAKT,CAAC,CAAD,EAAI,IAAJ,EAAU,IAAV,EAAgB,IAAhB,CALS,EAMT,CAAC,CAAD,EAAI,GAAJ,EAAS,IAAT,EAAe,GAAf,CANS,EAOT,CAAC,CAAD,EAAI,GAAJ,EAAS,IAAT,EAAe,GAAf,CAPS,EAQT,CAAC,CAAD,EAAI,IAAJ,EAAU,IAAV,EAAgB,IAAhB,CARS,EAST,CAAC,CAAD,EAAI,IAAJ,EAAU,IAAV,EAAgB,IAAhB,CATS,EAUT,CAAC,EAAD,EAAK,IAAL,EAAW,IAAX,EAAiB,IAAjB,CAVS,EAWT,CAAC,EAAD,EAAK,GAAL,EAAU,GAAV,EAAe,GAAf,CAXS,EAYT,CAAC,EAAD,EAAK,GAAL,EAAU,GAAV,EAAe,GAAf,CAZS,EAaT,CAAC,EAAD,EAAK,GAAL,EAAU,GAAV,EAAe,GAAf,CAbS,EAcT,CAAC,EAAD,EAAK,GAAL,EAAU,GAAV,EAAe,GAAf,CAdS,CAAb;AAiBA,QAAIG,OAAO,GAAG;AACVU,WAAK,EAAE;AACHT,aAAK,EAAE,mDADJ;AAEHmB,gBAAQ,EAAE;AAFP,OADG;AAKVX,YAAM,EAAE,CAAC,SAAD,EAAY,SAAZ,EAAuB,SAAvB;AALE,KAAd;AAQA,QAAIC,KAAK,GAAG,IAAIvB,MAAM,CAACkC,MAAP,CAAcC,IAAlB,CAAuBV,QAAQ,CAACC,cAAT,CAAwB,aAAxB,CAAvB,CAAZ;AACAH,SAAK,CAACI,IAAN,CAAWrB,IAAX,EAAiBO,OAAjB;AACH,GAnCD;;AAqCA,SAAO;AACH;AACAuB,QAAI,EAAE,gBAAW;AACbrC,UAAI;AACP,KAJE;AAMHK,YAAQ,EAAE,oBAAW;AACjBC,sBAAgB;AAChB2B,oBAAc;AACdJ,mBAAa;AAChB;AAVE,GAAP;AAYH,CArKwB,EAAzB;;AAuKA9B,kBAAkB,CAACsC,IAAnB","file":"./resources/metronic/js/pages/features/charts/google-charts.js.js","sourcesContent":["\"use strict\";\r\n// Class definition\r\nvar KTGoogleChartsDemo = function() {\r\n\r\n    // Private functions\r\n\r\n    var main = function() {\r\n        // GOOGLE CHARTS INIT\r\n        google.load('visualization', '1', {\r\n            packages: ['corechart', 'bar', 'line']\r\n        });\r\n\r\n        google.setOnLoadCallback(function() {\r\n            KTGoogleChartsDemo.runDemos();\r\n        });\r\n    }\r\n\r\n    var demoColumnCharts = function() {\r\n        // COLUMN CHART\r\n        var data = new google.visualization.DataTable();\r\n        data.addColumn('timeofday', 'Time of Day');\r\n        data.addColumn('number', 'Motivation Level');\r\n        data.addColumn('number', 'Energy Level');\r\n\r\n        data.addRows([\r\n            [{\r\n                v: [8, 0, 0],\r\n                f: '8 am'\r\n            }, 1, .25],\r\n            [{\r\n                v: [9, 0, 0],\r\n                f: '9 am'\r\n            }, 2, .5],\r\n            [{\r\n                v: [10, 0, 0],\r\n                f: '10 am'\r\n            }, 3, 1],\r\n            [{\r\n                v: [11, 0, 0],\r\n                f: '11 am'\r\n            }, 4, 2.25],\r\n            [{\r\n                v: [12, 0, 0],\r\n                f: '12 pm'\r\n            }, 5, 2.25],\r\n            [{\r\n                v: [13, 0, 0],\r\n                f: '1 pm'\r\n            }, 6, 3],\r\n            [{\r\n                v: [14, 0, 0],\r\n                f: '2 pm'\r\n            }, 7, 4],\r\n            [{\r\n                v: [15, 0, 0],\r\n                f: '3 pm'\r\n            }, 8, 5.25],\r\n            [{\r\n                v: [16, 0, 0],\r\n                f: '4 pm'\r\n            }, 9, 7.5],\r\n            [{\r\n                v: [17, 0, 0],\r\n                f: '5 pm'\r\n            }, 10, 10],\r\n        ]);\r\n\r\n        var options = {\r\n            title: 'Motivation and Energy Level Throughout the Day',\r\n            focusTarget: 'category',\r\n            hAxis: {\r\n                title: 'Time of Day',\r\n                format: 'h:mm a',\r\n                viewWindow: {\r\n                    min: [7, 30, 0],\r\n                    max: [17, 30, 0]\r\n                },\r\n            },\r\n            vAxis: {\r\n                title: 'Rating (scale of 1-10)'\r\n            },\r\n            colors: ['#6e4ff5', '#fe3995']\r\n        };\r\n\r\n        var chart = new google.visualization.ColumnChart(document.getElementById('kt_gchart_1'));\r\n        chart.draw(data, options);\r\n\r\n        var chart = new google.visualization.ColumnChart(document.getElementById('kt_gchart_2'));\r\n        chart.draw(data, options);\r\n    }\r\n\r\n    var demoPieCharts = function() {\r\n        var data = google.visualization.arrayToDataTable([\r\n            ['Task', 'Hours per Day'],\r\n            ['Work', 11],\r\n            ['Eat', 2],\r\n            ['Commute', 2],\r\n            ['Watch TV', 2],\r\n            ['Sleep', 7]\r\n        ]);\r\n\r\n        var options = {\r\n            title: 'My Daily Activities',\r\n            colors: ['#fe3995', '#f6aa33', '#6e4ff5', '#2abe81', '#c7d2e7', '#593ae1']\r\n        };\r\n\r\n        var chart = new google.visualization.PieChart(document.getElementById('kt_gchart_3'));\r\n        chart.draw(data, options);\r\n\r\n        var options = {\r\n            pieHole: 0.4,\r\n            colors: ['#fe3995', '#f6aa33', '#6e4ff5', '#2abe81', '#c7d2e7', '#593ae1']\r\n        };\r\n\r\n        var chart = new google.visualization.PieChart(document.getElementById('kt_gchart_4'));\r\n        chart.draw(data, options);\r\n    }    \r\n\r\n    var demoLineCharts = function() {\r\n        // LINE CHART\r\n        var data = new google.visualization.DataTable();\r\n        data.addColumn('number', 'Day');\r\n        data.addColumn('number', 'Guardians of the Galaxy');\r\n        data.addColumn('number', 'The Avengers');\r\n        data.addColumn('number', 'Transformers: Age of Extinction');\r\n\r\n        data.addRows([\r\n            [1, 37.8, 80.8, 41.8],\r\n            [2, 30.9, 69.5, 32.4],\r\n            [3, 25.4, 57, 25.7],\r\n            [4, 11.7, 18.8, 10.5],\r\n            [5, 11.9, 17.6, 10.4],\r\n            [6, 8.8, 13.6, 7.7],\r\n            [7, 7.6, 12.3, 9.6],\r\n            [8, 12.3, 29.2, 10.6],\r\n            [9, 16.9, 42.9, 14.8],\r\n            [10, 12.8, 30.9, 11.6],\r\n            [11, 5.3, 7.9, 4.7],\r\n            [12, 6.6, 8.4, 5.2],\r\n            [13, 4.8, 6.3, 3.6],\r\n            [14, 4.2, 6.2, 3.4]\r\n        ]);\r\n\r\n        var options = {\r\n            chart: {\r\n                title: 'Box Office Earnings in First Two Weeks of Opening',\r\n                subtitle: 'in millions of dollars (USD)'\r\n            },\r\n            colors: ['#6e4ff5', '#f6aa33', '#fe3995']\r\n        };\r\n\r\n        var chart = new google.charts.Line(document.getElementById('kt_gchart_5'));\r\n        chart.draw(data, options);\r\n    }\r\n\r\n    return {\r\n        // public functions\r\n        init: function() {\r\n            main();\r\n        },\r\n\r\n        runDemos: function() {\r\n            demoColumnCharts();\r\n            demoLineCharts();\r\n            demoPieCharts();\r\n        }\r\n    };\r\n}();\r\n\r\nKTGoogleChartsDemo.init();"],"sourceRoot":""}\n//# sourceURL=webpack-internal:///./resources/metronic/js/pages/features/charts/google-charts.js\n"); /***/ }), /***/ 140: /*!****************************************************************************!*\ !*** multi ./resources/metronic/js/pages/features/charts/google-charts.js ***! \****************************************************************************/ /*! no static exports found */ /***/ (function(module, exports, __webpack_require__) { module.exports = __webpack_require__(/*! C:\wamp64\www\keenthemes\themes\metronic\theme\html_laravel\demo1\skeleton\resources\metronic\js\pages\features\charts\google-charts.js */"./resources/metronic/js/pages/features/charts/google-charts.js"); /***/ }) /******/ });
__webpack_require__
clusters.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.23.0 // protoc v3.14.0 // source: envoy/admin/v3/clusters.proto package envoy_admin_v3 import ( v31 "github.com/cilium/proxy/go/envoy/config/cluster/v3" v32 "github.com/cilium/proxy/go/envoy/config/core/v3" v3 "github.com/cilium/proxy/go/envoy/type/v3" _ "github.com/cncf/xds/go/udpa/annotations" proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // This is a compile-time assertion that a sufficiently up-to-date version // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 // Admin endpoint uses this wrapper for `/clusters` to display cluster status information. // See :ref:`/clusters <operations_admin_interface_clusters>` for more information. type Clusters struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Mapping from cluster name to each cluster's status. ClusterStatuses []*ClusterStatus `protobuf:"bytes,1,rep,name=cluster_statuses,json=clusterStatuses,proto3" json:"cluster_statuses,omitempty"` } func (x *Clusters) Reset() { *x = Clusters{} if protoimpl.UnsafeEnabled { mi := &file_envoy_admin_v3_clusters_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Clusters) String() string { return protoimpl.X.MessageStringOf(x) } func (*Clusters) ProtoMessage() {} func (x *Clusters) ProtoReflect() protoreflect.Message { mi := &file_envoy_admin_v3_clusters_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Clusters.ProtoReflect.Descriptor instead. func (*Clusters) Descriptor() ([]byte, []int) { return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{0} } func (x *Clusters) GetClusterStatuses() []*ClusterStatus { if x != nil { return x.ClusterStatuses } return nil } // Details an individual cluster's current status. // [#next-free-field: 8] type ClusterStatus struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Name of the cluster. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Denotes whether this cluster was added via API or configured statically. AddedViaApi bool `protobuf:"varint,2,opt,name=added_via_api,json=addedViaApi,proto3" json:"added_via_api,omitempty"` // The success rate threshold used in the last interval. // If // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` // is *false*, all errors: externally and locally generated were used to calculate the threshold. // If // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` // is *true*, only externally generated errors were used to calculate the threshold. // The threshold is used to eject hosts based on their success rate. See // :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for details. // // Note: this field may be omitted in any of the three following cases: // // 1. There were not enough hosts with enough request volume to proceed with success rate based // outlier ejection. // 2. The threshold is computed to be < 0 because a negative value implies that there was no // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. SuccessRateEjectionThreshold *v3.Percent `protobuf:"bytes,3,opt,name=success_rate_ejection_threshold,json=successRateEjectionThreshold,proto3" json:"success_rate_ejection_threshold,omitempty"` // Mapping from host address to the host's current status. HostStatuses []*HostStatus `protobuf:"bytes,4,rep,name=host_statuses,json=hostStatuses,proto3" json:"host_statuses,omitempty"` // The success rate threshold used in the last interval when only locally originated failures were // taken into account and externally originated errors were treated as success. // This field should be interpreted only when // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` // is *true*. The threshold is used to eject hosts based on their success rate. // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for // details. // // Note: this field may be omitted in any of the three following cases: // // 1. There were not enough hosts with enough request volume to proceed with success rate based // outlier ejection. // 2. The threshold is computed to be < 0 because a negative value implies that there was no // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. LocalOriginSuccessRateEjectionThreshold *v3.Percent `protobuf:"bytes,5,opt,name=local_origin_success_rate_ejection_threshold,json=localOriginSuccessRateEjectionThreshold,proto3" json:"local_origin_success_rate_ejection_threshold,omitempty"` // :ref:`Circuit breaking <arch_overview_circuit_break>` settings of the cluster. CircuitBreakers *v31.CircuitBreakers `protobuf:"bytes,6,opt,name=circuit_breakers,json=circuitBreakers,proto3" json:"circuit_breakers,omitempty"` // Observability name of the cluster. ObservabilityName string `protobuf:"bytes,7,opt,name=observability_name,json=observabilityName,proto3" json:"observability_name,omitempty"` } func (x *ClusterStatus) Reset() { *x = ClusterStatus{} if protoimpl.UnsafeEnabled { mi := &file_envoy_admin_v3_clusters_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ClusterStatus) String() string { return protoimpl.X.MessageStringOf(x) } func (*ClusterStatus) ProtoMessage() {} func (x *ClusterStatus) ProtoReflect() protoreflect.Message { mi := &file_envoy_admin_v3_clusters_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ClusterStatus.ProtoReflect.Descriptor instead. func (*ClusterStatus) Descriptor() ([]byte, []int) { return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{1} } func (x *ClusterStatus) GetName() string { if x != nil { return x.Name } return "" } func (x *ClusterStatus) GetAddedViaApi() bool { if x != nil { return x.AddedViaApi } return false } func (x *ClusterStatus) GetSuccessRateEjectionThreshold() *v3.Percent { if x != nil { return x.SuccessRateEjectionThreshold } return nil } func (x *ClusterStatus) GetHostStatuses() []*HostStatus { if x != nil { return x.HostStatuses } return nil } func (x *ClusterStatus) GetLocalOriginSuccessRateEjectionThreshold() *v3.Percent { if x != nil { return x.LocalOriginSuccessRateEjectionThreshold } return nil } func (x *ClusterStatus) GetCircuitBreakers() *v31.CircuitBreakers { if x != nil { return x.CircuitBreakers } return nil } func (x *ClusterStatus) GetObservabilityName() string { if x != nil { return x.ObservabilityName } return "" } // Current state of a particular host. // [#next-free-field: 10] type HostStatus struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Address of this host. Address *v32.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // List of stats specific to this host. Stats []*SimpleMetric `protobuf:"bytes,2,rep,name=stats,proto3" json:"stats,omitempty"` // The host's current health status. HealthStatus *HostHealthStatus `protobuf:"bytes,3,opt,name=health_status,json=healthStatus,proto3" json:"health_status,omitempty"` // Request success rate for this host over the last calculated interval. // If // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` // is *false*, all errors: externally and locally generated were used in success rate // calculation. If // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` // is *true*, only externally generated errors were used in success rate calculation. // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for // details. // // Note: the message will not be present if host did not have enough request volume to calculate // success rate or the cluster did not have enough hosts to run through success rate outlier // ejection. SuccessRate *v3.Percent `protobuf:"bytes,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` // The host's weight. If not configured, the value defaults to 1. Weight uint32 `protobuf:"varint,5,opt,name=weight,proto3" json:"weight,omitempty"` // The hostname of the host, if applicable. Hostname string `protobuf:"bytes,6,opt,name=hostname,proto3" json:"hostname,omitempty"` // The host's priority. If not configured, the value defaults to 0 (highest priority). Priority uint32 `protobuf:"varint,7,opt,name=priority,proto3" json:"priority,omitempty"` // Request success rate for this host over the last calculated // interval when only locally originated errors are taken into account and externally originated // errors were treated as success. // This field should be interpreted only when // :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_config.cluster.v3.OutlierDetection.split_external_local_origin_errors>` // is *true*. // See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for // details. // // Note: the message will not be present if host did not have enough request volume to calculate // success rate or the cluster did not have enough hosts to run through success rate outlier // ejection. LocalOriginSuccessRate *v3.Percent `protobuf:"bytes,8,opt,name=local_origin_success_rate,json=localOriginSuccessRate,proto3" json:"local_origin_success_rate,omitempty"` // locality of the host. Locality *v32.Locality `protobuf:"bytes,9,opt,name=locality,proto3" json:"locality,omitempty"` } func (x *HostStatus) Reset() { *x = HostStatus{} if protoimpl.UnsafeEnabled { mi := &file_envoy_admin_v3_clusters_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *HostStatus) String() string { return protoimpl.X.MessageStringOf(x) } func (*HostStatus) ProtoMessage() {} func (x *HostStatus) ProtoReflect() protoreflect.Message { mi := &file_envoy_admin_v3_clusters_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use HostStatus.ProtoReflect.Descriptor instead. func (*HostStatus) Descriptor() ([]byte, []int) { return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{2} } func (x *HostStatus) GetAddress() *v32.Address { if x != nil { return x.Address } return nil } func (x *HostStatus) GetStats() []*SimpleMetric { if x != nil { return x.Stats } return nil } func (x *HostStatus) GetHealthStatus() *HostHealthStatus { if x != nil { return x.HealthStatus } return nil } func (x *HostStatus) GetSuccessRate() *v3.Percent { if x != nil { return x.SuccessRate } return nil } func (x *HostStatus) GetWeight() uint32 { if x != nil { return x.Weight } return 0 } func (x *HostStatus) GetHostname() string { if x != nil { return x.Hostname } return "" } func (x *HostStatus) GetPriority() uint32 { if x != nil { return x.Priority } return 0 } func (x *HostStatus) GetLocalOriginSuccessRate() *v3.Percent { if x != nil { return x.LocalOriginSuccessRate } return nil } func (x *HostStatus) GetLocality() *v32.Locality { if x != nil { return x.Locality } return nil } // Health status for a host. // [#next-free-field: 9] type HostHealthStatus struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The host is currently failing active health checks. FailedActiveHealthCheck bool `protobuf:"varint,1,opt,name=failed_active_health_check,json=failedActiveHealthCheck,proto3" json:"failed_active_health_check,omitempty"` // The host is currently considered an outlier and has been ejected. FailedOutlierCheck bool `protobuf:"varint,2,opt,name=failed_outlier_check,json=failedOutlierCheck,proto3" json:"failed_outlier_check,omitempty"` // The host is currently being marked as degraded through active health checking. FailedActiveDegradedCheck bool `protobuf:"varint,4,opt,name=failed_active_degraded_check,json=failedActiveDegradedCheck,proto3" json:"failed_active_degraded_check,omitempty"` // The host has been removed from service discovery, but is being stabilized due to active // health checking. PendingDynamicRemoval bool `protobuf:"varint,5,opt,name=pending_dynamic_removal,json=pendingDynamicRemoval,proto3" json:"pending_dynamic_removal,omitempty"` // The host has not yet been health checked. PendingActiveHc bool `protobuf:"varint,6,opt,name=pending_active_hc,json=pendingActiveHc,proto3" json:"pending_active_hc,omitempty"` // The host should be excluded from panic, spillover, etc. calculations because it was explicitly // taken out of rotation via protocol signal and is not meant to be routed to. ExcludedViaImmediateHcFail bool `protobuf:"varint,7,opt,name=excluded_via_immediate_hc_fail,json=excludedViaImmediateHcFail,proto3" json:"excluded_via_immediate_hc_fail,omitempty"` // The host failed active HC due to timeout. ActiveHcTimeout bool `protobuf:"varint,8,opt,name=active_hc_timeout,json=activeHcTimeout,proto3" json:"active_hc_timeout,omitempty"` // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported // here. // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.] EdsHealthStatus v32.HealthStatus `protobuf:"varint,3,opt,name=eds_health_status,json=edsHealthStatus,proto3,enum=envoy.config.core.v3.HealthStatus" json:"eds_health_status,omitempty"` } func (x *HostHealthStatus) Reset() { *x = HostHealthStatus{} if protoimpl.UnsafeEnabled { mi := &file_envoy_admin_v3_clusters_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *HostHealthStatus) String() string { return protoimpl.X.MessageStringOf(x) } func (*HostHealthStatus) ProtoMessage() {} func (x *HostHealthStatus) ProtoReflect() protoreflect.Message { mi := &file_envoy_admin_v3_clusters_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use HostHealthStatus.ProtoReflect.Descriptor instead. func (*HostHealthStatus) Descriptor() ([]byte, []int) { return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{3} } func (x *HostHealthStatus) GetFailedActiveHealthCheck() bool { if x != nil { return x.FailedActiveHealthCheck } return false } func (x *HostHealthStatus) GetFailedOutlierCheck() bool { if x != nil { return x.FailedOutlierCheck } return false } func (x *HostHealthStatus) GetFailedActiveDegradedCheck() bool { if x != nil { return x.FailedActiveDegradedCheck } return false } func (x *HostHealthStatus) GetPendingDynamicRemoval() bool { if x != nil { return x.PendingDynamicRemoval } return false } func (x *HostHealthStatus) GetPendingActiveHc() bool { if x != nil { return x.PendingActiveHc } return false } func (x *HostHealthStatus) GetExcludedViaImmediateHcFail() bool { if x != nil { return x.ExcludedViaImmediateHcFail } return false } func (x *HostHealthStatus) GetActiveHcTimeout() bool { if x != nil { return x.ActiveHcTimeout } return false } func (x *HostHealthStatus) GetEdsHealthStatus() v32.HealthStatus { if x != nil { return x.EdsHealthStatus } return v32.HealthStatus_UNKNOWN } var File_envoy_admin_v3_clusters_proto protoreflect.FileDescriptor var file_envoy_admin_v3_clusters_proto_rawDesc = []byte{ 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x5f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x79, 0x0a, 0x08, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x22, 0x8c, 0x04, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x61, 0x64, 0x64, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x61, 0x5f, 0x61, 0x70, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x65, 0x64, 0x56, 0x69, 0x61, 0x41, 0x70, 0x69, 0x12, 0x5d, 0x0a, 0x1f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x1c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x3f, 0x0a, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x75, 0x0a, 0x2c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x27, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x53, 0x0a, 0x10, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x5f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x0f, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x81, 0x04, 0x0a, 0x0a, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x51, 0x0a, 0x19, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x93, 0x04, 0x0a, 0x10, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x1c, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x36, 0x0a, 0x17, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x63, 0x12, 0x42, 0x0a, 0x1e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x61, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x63, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x56, 0x69, 0x61, 0x49, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x48, 0x63, 0x46, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4e, 0x0a, 0x11, 0x65, 0x64, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, 0x65, 0x64, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x37, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_envoy_admin_v3_clusters_proto_rawDescOnce sync.Once file_envoy_admin_v3_clusters_proto_rawDescData = file_envoy_admin_v3_clusters_proto_rawDesc ) func file_envoy_admin_v3_clusters_proto_rawDescGZIP() []byte
var file_envoy_admin_v3_clusters_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_envoy_admin_v3_clusters_proto_goTypes = []interface{}{ (*Clusters)(nil), // 0: envoy.admin.v3.Clusters (*ClusterStatus)(nil), // 1: envoy.admin.v3.ClusterStatus (*HostStatus)(nil), // 2: envoy.admin.v3.HostStatus (*HostHealthStatus)(nil), // 3: envoy.admin.v3.HostHealthStatus (*v3.Percent)(nil), // 4: envoy.type.v3.Percent (*v31.CircuitBreakers)(nil), // 5: envoy.config.cluster.v3.CircuitBreakers (*v32.Address)(nil), // 6: envoy.config.core.v3.Address (*SimpleMetric)(nil), // 7: envoy.admin.v3.SimpleMetric (*v32.Locality)(nil), // 8: envoy.config.core.v3.Locality (v32.HealthStatus)(0), // 9: envoy.config.core.v3.HealthStatus } var file_envoy_admin_v3_clusters_proto_depIdxs = []int32{ 1, // 0: envoy.admin.v3.Clusters.cluster_statuses:type_name -> envoy.admin.v3.ClusterStatus 4, // 1: envoy.admin.v3.ClusterStatus.success_rate_ejection_threshold:type_name -> envoy.type.v3.Percent 2, // 2: envoy.admin.v3.ClusterStatus.host_statuses:type_name -> envoy.admin.v3.HostStatus 4, // 3: envoy.admin.v3.ClusterStatus.local_origin_success_rate_ejection_threshold:type_name -> envoy.type.v3.Percent 5, // 4: envoy.admin.v3.ClusterStatus.circuit_breakers:type_name -> envoy.config.cluster.v3.CircuitBreakers 6, // 5: envoy.admin.v3.HostStatus.address:type_name -> envoy.config.core.v3.Address 7, // 6: envoy.admin.v3.HostStatus.stats:type_name -> envoy.admin.v3.SimpleMetric 3, // 7: envoy.admin.v3.HostStatus.health_status:type_name -> envoy.admin.v3.HostHealthStatus 4, // 8: envoy.admin.v3.HostStatus.success_rate:type_name -> envoy.type.v3.Percent 4, // 9: envoy.admin.v3.HostStatus.local_origin_success_rate:type_name -> envoy.type.v3.Percent 8, // 10: envoy.admin.v3.HostStatus.locality:type_name -> envoy.config.core.v3.Locality 9, // 11: envoy.admin.v3.HostHealthStatus.eds_health_status:type_name -> envoy.config.core.v3.HealthStatus 12, // [12:12] is the sub-list for method output_type 12, // [12:12] is the sub-list for method input_type 12, // [12:12] is the sub-list for extension type_name 12, // [12:12] is the sub-list for extension extendee 0, // [0:12] is the sub-list for field type_name } func init() { file_envoy_admin_v3_clusters_proto_init() } func file_envoy_admin_v3_clusters_proto_init() { if File_envoy_admin_v3_clusters_proto != nil { return } file_envoy_admin_v3_metrics_proto_init() if !protoimpl.UnsafeEnabled { file_envoy_admin_v3_clusters_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Clusters); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_admin_v3_clusters_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ClusterStatus); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_admin_v3_clusters_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HostStatus); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_admin_v3_clusters_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HostHealthStatus); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_admin_v3_clusters_proto_rawDesc, NumEnums: 0, NumMessages: 4, NumExtensions: 0, NumServices: 0, }, GoTypes: file_envoy_admin_v3_clusters_proto_goTypes, DependencyIndexes: file_envoy_admin_v3_clusters_proto_depIdxs, MessageInfos: file_envoy_admin_v3_clusters_proto_msgTypes, }.Build() File_envoy_admin_v3_clusters_proto = out.File file_envoy_admin_v3_clusters_proto_rawDesc = nil file_envoy_admin_v3_clusters_proto_goTypes = nil file_envoy_admin_v3_clusters_proto_depIdxs = nil }
{ file_envoy_admin_v3_clusters_proto_rawDescOnce.Do(func() { file_envoy_admin_v3_clusters_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_clusters_proto_rawDescData) }) return file_envoy_admin_v3_clusters_proto_rawDescData }
bundlesRequestBuilderGetRequestConfiguration.ts
import {BundlesRequestBuilderGetQueryParameters} from './bundlesRequestBuilderGetQueryParameters'; import {RequestOption} from '@microsoft/kiota-abstractions'; /** Configuration for the request such as headers, query parameters, and middleware options. */ export class
{ /** Request headers */ public headers?: Record<string, string> | undefined; /** Request options */ public options?: RequestOption[] | undefined; /** Request query parameters */ public queryParameters?: BundlesRequestBuilderGetQueryParameters | undefined; }
BundlesRequestBuilderGetRequestConfiguration
quickstart.py
#!/usr/bin/env python # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #$env:GOOGLE_APPLICATION_CREDENTIALS="C:\Users\heinz\OneDrive - Yagora GmbH\Heinz Yagora privat\privat\computer_vision\python-vision\FMCG-Vision-71451fde95cf.json" def run_quickstart(): # [START vision_quickstart] import io import os # Imports the Google Cloud client library # [START vision_python_migration_import] from google.cloud import vision # [END vision_python_migration_import] # Instantiates a client # [START vision_python_migration_client] client = vision.ImageAnnotatorClient() # [END vision_python_migration_client] # The name of the image file to annotate file_name = os.path.abspath('resources/wakeupcat.jpg') # Loads the image into memory with io.open(file_name, 'rb') as image_file: content = image_file.read() image = vision.Image(content=content) # Performs label detection on the image file response = client.label_detection(image=image) labels = response.label_annotations print('Labels:') for label in labels: print(label.description) # [END vision_quickstart]
if __name__ == '__main__': run_quickstart()
__init__.py
import logging import random from typing import List, Tuple import numpy as np from skimage.transform import resize from scipy.ndimage import zoom from toolbox import images from toolbox.images import crop, mask_bbox from .poisson_disk import sample_poisson_uniform logger = logging.getLogger(__name__) class PatchType: S2F_MASKED_BLACK = 'cropped_scaled_to_fit' S2F_MASKED_WHITE = 'cropped_scaled_to_fit_white' S2F = 'scaled_to_fit' RANDOM = 'random2' def sample_poisson_mask(mask, r, k): ymin, ymax, xmin, xmax = mask_bbox(mask) height = ymax - ymin width = xmax - xmin points = np.array(sample_poisson_uniform(height, width, r, k, mask[ymin:ymax, xmin:xmax])) points[:, 0] += ymin points[:, 1] += xmin points = np.floor(points).astype(int) return points def generate_dense_bboxes( mask: np.ndarray, scale=0.23, min_dist=0.091): mask_height, mask_width = mask.shape min_length = min(mask_height, mask_width) patch_sample_size = scale * min_length centers = sample_poisson_mask(mask, min_length * min_dist, 1000) half = int(patch_sample_size / 2) bboxes = [] for center in centers: ycent, xcent = center bbox = (ycent - half, ycent + half + 1, xcent - half, xcent + half + 1) if (bbox[0] >= 0 and bbox[1] < mask_height and bbox[2] >= 0 and bbox[3] < mask_width): bboxes.append(bbox) print('bboxes={} centers={}, mask_size={}, min_dist={}'.format( len(bboxes), len(centers), mask.shape, min_length * min_dist)) return bboxes def random_crops(image, patch_size, num_crops):
def generate_random_bboxes(mask: np.ndarray, scale_range=(1.0, 1.0), num_patches=5, fixed_size=None): """ Generates random bounding boxes at random scales with centroid within the mask. :param mask: The contrained area for the centroid of the patch. :param min_scale: The min scale (multiple of the minimum length of the input mask) of the sampling. :param max_scale: The max scale (multiple of the minimum length of the input mask) of the sampling. :param num_patches: Number of patches to generate. :return: Bounding boxes. """ mask_height, mask_width = mask.shape[:2] min_length = min(mask_height, mask_width) yinds, xinds = np.where(mask) patch_bboxes = [] patch_scales = [] tries = 0 while len(patch_bboxes) < num_patches: scale = random.uniform(*scale_range) patch_scales.append(scale) patch_size = scale * fixed_size if fixed_size else int(scale * min_length) point_idx = np.random.randint(0, len(yinds)) ycent, xcent = yinds[point_idx], xinds[point_idx] half = int(patch_size / 2) # Just squash the patch if it's out of bounds. if (ycent - half < 0 or ycent + half > mask.shape[0] or xcent - half < 0 or xcent + half > mask.shape[1]): if tries < 100: tries += 1 continue bbox = (max(ycent - half, 0), min(ycent + half + 1, mask.shape[0]), max(xcent - half, 0), min(xcent + half + 1, mask.shape[1])) patch_bboxes.append(bbox) return patch_bboxes, patch_scales def bboxes_to_patches(im: np.ndarray, bboxes: List[Tuple[int, int, int, int]], patch_size: int, use_pil=False): """ Converts bounding boxes to actual patches. Patches are all resized to the patch size regardless of the original bounding box size. :param im: To crop patch from. :param bboxes: Boxes defining the patch. :param patch_size: Patch size to return. :return: Image patches. """ patches = [] for bbox in bboxes: cropped = crop(im, bbox) if cropped.shape[0] != patch_size or cropped.shape[1] != patch_size: scale = [patch_size/cropped.shape[0], patch_size/cropped.shape[1]] if len(im.shape) == 3: scale.append(1.0) if use_pil: cropped = resize(cropped, (patch_size, patch_size)) \ .astype(dtype=np.float32) else: cropped = zoom(cropped, scale, im.dtype, order=1) patches.append(cropped) return patches def compute_mask_tight_patch(im: np.ndarray, mask: np.ndarray, patch_size: int): """ Computes a patch which contains all the pixels active in the mask scaled to the patch size. :param im: :param mask: :param patch_size: :return: """ bbox = images.compute_mask_bbox(mask) cropped = images.crop(im, bbox) resized = imresize(cropped, (patch_size, patch_size, cropped.shape[2])) return resized def compute_minmax_thickness(mask): max_width = 0 max_height = 0 for row_id in range(mask.shape[0]): row = mask[row_id, :] split_locs = np.where(np.diff(row) != 0)[0] + 1 for segment in (np.split(row, split_locs)): if segment[0] != 0: max_width = max(max_width, len(segment)) for col_id in range(mask.shape[1]): col = mask[:, col_id] split_locs = np.where(np.diff(col) != 0)[0] + 1 for segment in (np.split(col, split_locs)): if segment[0] != 0: max_height = max(max_height, len(segment)) return min(max_width, max_height), max(max_width, max_height)
border_mask = np.ones(image.shape[:2], dtype=bool) left = patch_size/2 right = image.shape[1] - patch_size/2 top = patch_size/2 bottom = image.shape[0] - patch_size/2 border_mask[:, :left] = False border_mask[:, right:] = False border_mask[:top, :] = False border_mask[bottom:, :] = False yinds, xinds = np.where(border_mask) bboxes = [] for i in range(num_crops): point_idx = np.random.randint(0, len(yinds)) ycent, xcent = yinds[point_idx], xinds[point_idx] half = int(patch_size / 2) # Just squash the patch if it's out of bounds. bbox = (ycent - half, ycent + half + 1, xcent - half, xcent + half + 1) bboxes.append(bbox) return bboxes_to_patches(image, bboxes, patch_size)
appointement.component.ts
import { Component, OnInit } from '@angular/core'; @Component({ selector: 'app-appointement', templateUrl: './appointement.component.html', styleUrls: ['./appointement.component.css'] })
ngOnInit(): void { } }
export class AppointementComponent implements OnInit { constructor() { }
ProbeHandler.js
/* ###### ###### ####### ###### ####### # # # # # # # # # # # # # # # # # # ###### ###### # # ###### ##### # # # # # # # # # # # # # # # # # # # ####### ###### ####### */ class
{ constructor() { } /* * 获取 Host List */ getProbeHostList(){ let rtn = null; jQuery.ajax({ url: "/host?tag=linux&status=true", type: "GET", dataType: "json", contentType: 'application/json', async:false, complete: function(xhr, textStatus) { }, success: function(data, textStatus, xhr) { rtn = data; }, error: function(xhr, textStatus, errorThrown) { console.log("["+ moment().format("LLL")+"] [" + xhr.status + "] " + xhr.responseJSON.error); } }) return rtn; }; /* * 更新 Host * * 参数: * host = { "host": "string", "iplist": ["string"...], "isagent": 0 | 1, "domain": "string", "hostgroups": ["string"...], "sert": "string", "secret": "string", "config": {"cpu":"80%", "status":"ok"} } */ updateProbeHost(host){ let rtn = 0; jQuery.ajax({ url: "/host", type: "POST", dataType: "json", contentType: 'application/json', data: JSON.stringify(host), async:false, beforeSend: function(xhr) { }, complete: function(xhr, textStatus) { }, success: function(data, textStatus, xhr) { if( _.lowerCase(data.status) == "ok"){ rtn = 1; alertify.success("更新成功 " + host.name + " " + moment().format("LLL")); } else { rtn = 0; alertify.error("更新失败 " + host.name + " " + moment().format("LLL")); } }, error: function(xhr, textStatus, errorThrown) { rtn = 0; console.log("["+ moment().format("LLL")+"] [" + xhr.status + "] " + xhr.responseJSON.error); } }) return rtn; }; /* * 获取 Host By Name */ getProbeHost(name){ let rtn = null; jQuery.ajax({ url: `/host/${name}`, type: "GET", dataType: "json", contentType: 'application/json', async:false, complete: function(xhr, textStatus) { }, success: function(data, textStatus, xhr) { rtn = data; }, error: function(xhr, textStatus, errorThrown) { console.log("["+ moment().format("LLL")+"] [" + xhr.status + "] " + xhr.responseJSON.error); } }) return rtn; }; /* * 删除 Host By Name */ deleteProbeHost(name){ let rtn = 0; jQuery.ajax({ url: `/host/${name}`, type: "GET", dataType: "json", contentType: 'application/json', async:false, complete: function(xhr, textStatus) { }, success: function(data, textStatus, xhr) { if( _.lowerCase(data.status) == "ok"){ rtn = 1; alertify.success("删除成功 " + host.name + " " + moment().format("LLL")); } else { rtn = 0; alertify.error("删除失败 " + host.name + " " + moment().format("LLL")); } }, error: function(xhr, textStatus, errorThrown) { rtn = 0; console.log("["+ moment().format("LLL")+"] [" + xhr.status + "] " + xhr.responseJSON.error); } }) return rtn; }; } var probeHandler = new ProbeHandler();
ProbeHandler
0001_initial.py
# Generated by Django 3.0.5 on 2020-09-24 09:38 import api.models import datetime from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class
(migrations.Migration): initial = True dependencies = [ ('user', '0002_customer'), ] operations = [ migrations.CreateModel( name='Customers', fields=[ ('type', models.IntegerField(choices=[(1, '采购商'), (2, '供应商'), (3, '采购&供应')], verbose_name='客户类别')), ('name', models.CharField(default='选填', max_length=100, verbose_name='公司名称')), ('lite_name', models.CharField(help_text='如无公司则填联系人或CEO名称', max_length=32, primary_key=True, serialize=False, verbose_name='公司简称')), ('address', models.CharField(default='选填', max_length=200, verbose_name='公司地址')), ('phone', models.CharField(default='选填', max_length=40, verbose_name='公司电话')), ('website', models.URLField(default='https://example.com', verbose_name='网址')), ('business', models.CharField(max_length=64, verbose_name='主营业务')), ('ceo', models.CharField(default='选填', max_length=50, verbose_name='CEO')), ('email', models.EmailField(default='[email protected]', max_length=100, verbose_name='CEO邮箱')), ('ceo_phone', models.CharField(default='选填', max_length=20, verbose_name='CEO电话')), ('contact_name', models.CharField(max_length=100, verbose_name='联系人')), ('contact_email', models.EmailField(max_length=100, verbose_name='联系人邮箱')), ('contact_phone', models.CharField(max_length=50, verbose_name='联系人电话')), ('status', models.IntegerField(choices=[(1, '合作'), (2, '终止'), (3, '开发')], verbose_name='合作状态')), ('line_credits', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='信用额度')), ('input_time', models.DateField(auto_now_add=True, verbose_name='添加日期')), ('text', models.CharField(default='选填', max_length=480, verbose_name='备注')), ('is_delete', models.IntegerField(default=0)), ('sales', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='user.UserInfo', verbose_name='业务')), ], options={ 'verbose_name': '客户信息', 'verbose_name_plural': '客户信息', }, ), migrations.CreateModel( name='OrderCatalog', fields=[ ('order_number', models.CharField(max_length=64, primary_key=True, serialize=False, verbose_name='订单编号')), ('order_date', models.DateField(default=datetime.datetime.now, verbose_name='下单日期')), ('deliver_date', models.DateField(verbose_name='订单交期')), ('input_date', models.DateField(auto_now=True, verbose_name='录入日期')), ('ex_rate', models.DecimalField(decimal_places=4, max_digits=10, verbose_name='汇率')), ('order_amount', models.DecimalField(decimal_places=3, max_digits=20, verbose_name='金额')), ('order_pic', models.CharField(blank=True, max_length=128, null=True)), ('is_done', models.IntegerField(choices=[(0, '正常'), (1, '紧急')], default=0, verbose_name='状态')), ('text', models.CharField(default='选填', max_length=480, verbose_name='备注')), ('ship_addr', models.CharField(default='暂无', max_length=200, verbose_name='出货地点')), ('is_delete', models.IntegerField(default=0)), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Customers', verbose_name='客户')), ('sales', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='user.UserInfo', verbose_name='业务')), ], options={ 'verbose_name': '订单目录', 'verbose_name_plural': '订单目录', }, ), migrations.CreateModel( name='OrderModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('number', models.CharField(default=api.models.make_model_number, max_length=64)), ('atr', models.IntegerField(choices=[(0, '自有'), (1, '工厂'), (2, '样品')], default=1)), ('material', models.IntegerField(default=1)), ('size', models.CharField(blank=True, max_length=64, null=True)), ('construct', models.CharField(blank=True, max_length=64, null=True)), ('pro_date', models.DateField(default=datetime.datetime.now)), ('useful_life', models.IntegerField(blank=True, null=True)), ('price', models.DecimalField(decimal_places=2, max_digits=10)), ('remarks', models.CharField(blank=True, max_length=256, null=True)), ('is_delete', models.IntegerField(default=0)), ], ), migrations.CreateModel( name='SubOrder', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pro_name', models.CharField(max_length=50, verbose_name='产品名称')), ('pro_item', models.IntegerField(choices=[(1, '硅胶'), (2, '五金'), (3, 'USB'), (4, '移动电源'), (5, '其他')], verbose_name='产品类别')), ('pro_size', models.CharField(default=0, max_length=64, verbose_name='产品尺寸')), ('pro_color', models.CharField(blank=True, max_length=1024)), ('pro_pack', models.CharField(default='无', max_length=64, verbose_name='产品包装')), ('pro_desc', models.CharField(max_length=400, verbose_name='详细描述')), ('pro_qt', models.DecimalField(decimal_places=1, max_digits=20, verbose_name='数量(个)')), ('pro_price', models.DecimalField(decimal_places=3, max_digits=20, verbose_name='单价($)')), ('pro_weight', models.DecimalField(decimal_places=1, max_digits=20, verbose_name='单重(g)')), ('sub_amount', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='订单金额($)')), ('sub_input_date', models.DateField(auto_now=True, verbose_name='录入日期')), ('is_delete', models.IntegerField(default=0)), ('is_purchase', models.IntegerField(default=0)), ('is_ship', models.IntegerField(default=0)), ('is_account', models.IntegerField(default=0)), ('is_reconciliation', models.IntegerField(default=0)), ('status', models.IntegerField(default=1)), ('order_number', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sub_orders', to='api.OrderCatalog', verbose_name='订单编号')), ('sales', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='user.UserInfo', verbose_name='业务')), ], options={ 'verbose_name': '订单明细', 'verbose_name_plural': '订单明细', }, ), migrations.CreateModel( name='ShipOrder', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ship_plan', models.IntegerField(choices=[('国际快递', ((1, 'UPS'), (2, 'FedEx'), (3, 'DHL'))), (4, '国内快递'), (5, '船运'), (6, '自提'), (7, '第三方'), (8, '其他')], verbose_name='出货方式')), ('ship_number', models.CharField(max_length=50, verbose_name='出货单号')), ('ship_date', models.DateField(default=datetime.datetime.now, verbose_name='出货日期')), ('ship_cost', models.DecimalField(decimal_places=4, max_digits=20, verbose_name='出货费用(¥)')), ('ship_weight', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='重量(kg)')), ('text', models.CharField(default='选填', max_length=480, verbose_name='备注')), ('destination', models.CharField(default='American', max_length=32)), ('is_delete', models.IntegerField(default=0)), ('input_date', models.DateTimeField(auto_now_add=True)), ('sales', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='user.UserInfo', verbose_name='业务')), ('ship_customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Customers', verbose_name='货代')), ], options={ 'verbose_name': '出货记录', 'verbose_name_plural': '出货记录', }, ), migrations.CreateModel( name='ShipDetail', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ship_cost', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='出货费用(¥)')), ('ship_weight', models.DecimalField(decimal_places=2, default=0, max_digits=8, verbose_name='出货重量(kg)')), ('is_delete', models.IntegerField(default=0)), ('ship_number', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.ShipOrder', verbose_name='出货单号')), ('sub_order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ships', to='api.SubOrder', verbose_name='订单明细')), ], options={ 'verbose_name': '出货明细', 'verbose_name_plural': '出货明细', }, ), migrations.CreateModel( name='PurchaseOrder', fields=[ ('purchase_date', models.DateField(default=datetime.datetime.now, verbose_name='采购日期')), ('deliver_date', models.DateField(verbose_name='采购交期')), ('purchase_number', models.CharField(max_length=50, primary_key=True, serialize=False, verbose_name='采购单号')), ('input_date', models.DateTimeField(auto_now_add=True)), ('text', models.CharField(default='选填', max_length=400, verbose_name='备注')), ('is_delete', models.IntegerField(default=0)), ('purchaser', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Customers', verbose_name='供应商')), ('sales', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='user.UserInfo', verbose_name='业务')), ], options={ 'verbose_name': '采购订单', 'verbose_name_plural': '采购订单', }, ), migrations.CreateModel( name='PurchaseDetail', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('purchase_price', models.DecimalField(decimal_places=2, default=0, max_digits=20, verbose_name='采购单价(¥)')), ('purchase_qt', models.DecimalField(decimal_places=2, default=0, max_digits=20, verbose_name='采购数量(个)')), ('purchase_amount', models.DecimalField(decimal_places=4, default=0, max_digits=20, verbose_name='采购金额($)')), ('text', models.CharField(default='选填', max_length=400, verbose_name='备注')), ('is_delete', models.IntegerField(default=0)), ('purchase_number', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.PurchaseOrder', verbose_name='采购单号')), ('sub_order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='purchases', to='api.SubOrder', verbose_name='订单明细')), ], options={ 'verbose_name': '采购明细', 'verbose_name_plural': '采购明细', }, ), migrations.CreateModel( name='OrderToModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sale_price', models.DecimalField(blank=True, decimal_places=3, max_digits=15, null=True)), ('input_date', models.DateTimeField(default=django.utils.timezone.now)), ('is_delete', models.IntegerField(default=0)), ('model', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.OrderModel')), ('order_number', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.OrderCatalog')), ], ), migrations.AddField( model_name='ordermodel', name='order_number', field=models.ManyToManyField(through='api.OrderToModel', to='api.OrderCatalog'), ), migrations.AddField( model_name='ordermodel', name='supplier', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='customer_model', to='api.Customers'), ), migrations.CreateModel( name='CustomerAddr', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('addr_type', models.IntegerField(choices=[(1, 'other'), (0, 'common')], default=1)), ('linkman', models.CharField(blank=True, max_length=64, null=True)), ('postcode', models.CharField(blank=True, max_length=10, null=True)), ('country', models.CharField(blank=True, max_length=64, null=True)), ('city', models.CharField(blank=True, max_length=64, null=True)), ('addr', models.CharField(blank=True, max_length=256, null=True)), ('input_date', models.DateField(auto_now=True)), ('is_delete', models.IntegerField(default=0)), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Customers')), ], ), ]
Migration
conanfile.py
import glob import os from conans import ConanFile, tools, AutoToolsBuildEnvironment, VisualStudioBuildEnvironment class LibxsltConan(ConanFile): name = "libxslt" url = "https://github.com/conan-io/conan-center-index" description = "libxslt is a software library implementing XSLT processor, based on libxml2" topics = ("XSLT", "processor") homepage = "https://xmlsoft.org" license = "MIT" settings = "os", "arch", "compiler", "build_type" default_options = {'shared': False, 'fPIC': True, "debugger": False, "crypto": False, "profiler": False, "plugins": False} options = {name: [True, False] for name in default_options.keys()} _option_names = [name for name in default_options.keys() if name not in ["shared", "fPIC"]] _source_subfolder = "source_subfolder" exports_sources = "patches/**" def requirements(self): self.requires("libxml2/2.9.10") @property def _is_msvc(self): return self.settings.compiler == 'Visual Studio' @property def _full_source_subfolder(self):
def source(self): tools.get(**self.conan_data["sources"][self.version]) os.rename("libxslt-{0}".format(self.version), self._source_subfolder) def config_options(self): if self.settings.os == "Windows": del self.options.fPIC def configure(self): del self.settings.compiler.libcxx del self.settings.compiler.cppstd def _patch_sources(self): for patch in self.conan_data["patches"][self.version]: tools.patch(**patch) def build(self): self._patch_sources() if self._is_msvc: self._build_windows() else: self._build_with_configure() def _build_windows(self): with tools.chdir(os.path.join(self._full_source_subfolder, 'win32')): debug = "yes" if self.settings.build_type == "Debug" else "no" static = "no" if self.options.shared else "yes" with tools.vcvars(self.settings): args = ["cscript", "configure.js", "compiler=msvc", "prefix=%s" % self.package_folder, "cruntime=/%s" % self.settings.compiler.runtime, "debug=%s" % debug, "static=%s" % static, 'include="%s"' % ";".join(self.deps_cpp_info.include_paths), 'lib="%s"' % ";".join(self.deps_cpp_info.lib_paths), 'iconv=no', 'xslt_debug=no'] for name in self._option_names: cname = {"plugins": "modules"}.get(name, name) value = getattr(self.options, name) value = "yes" if value else "no" args.append("%s=%s" % (cname, value)) configure_command = ' '.join(args) self.output.info(configure_command) self.run(configure_command) # Fix library names because they can be not just zlib.lib def format_libs(package): libs = [] for lib in self.deps_cpp_info[package].libs: libname = lib if not libname.endswith('.lib'): libname += '.lib' libs.append(libname) for lib in self.deps_cpp_info[package].system_libs: libname = lib if not libname.endswith('.lib'): libname += '.lib' libs.append(libname) return ' '.join(libs) def fix_library(option, package, old_libname): if option: tools.replace_in_file("Makefile.msvc", "LIBS = %s" % old_libname, "LIBS = %s" % format_libs(package)) if "icu" in self.deps_cpp_info.deps: fix_library(True, 'icu', 'wsock32.lib') tools.replace_in_file("Makefile.msvc", "libxml2.lib", format_libs("libxml2")) tools.replace_in_file("Makefile.msvc", "libxml2_a.lib", format_libs("libxml2")) with tools.environment_append(VisualStudioBuildEnvironment(self).vars): self.run("nmake /f Makefile.msvc install") def _build_with_configure(self): env_build = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows) full_install_subfolder = tools.unix_path(self.package_folder) # fix rpath if self.settings.os == "Macos": tools.replace_in_file(os.path.join(self._full_source_subfolder, "configure"), r"-install_name \$rpath/", "-install_name ") configure_args = ['--with-python=no', '--prefix=%s' % full_install_subfolder] if self.options.shared: configure_args.extend(['--enable-shared', '--disable-static']) else: configure_args.extend(['--enable-static', '--disable-shared']) xml_config = tools.unix_path(self.deps_cpp_info["libxml2"].rootpath) + "/bin/xml2-config" configure_args.append('XML_CONFIG=%s' % xml_config) for name in self._option_names: value = getattr(self.options, name) value = ("--with-%s" % name) if value else ("--without-%s" % name) configure_args.append(value) # Disable --build when building for iPhoneSimulator. The configure script halts on # not knowing if it should cross-compile. build = None if self.settings.os == "iOS" and self.settings.arch == "x86_64": build = False env_build.configure(args=configure_args, build=build, configure_dir=self._full_source_subfolder) env_build.make(args=["install", "V=1"]) def package(self): self.copy("COPYING", src=self._full_source_subfolder, dst="licenses", ignore_case=True, keep_path=False) tools.rmdir(os.path.join(self.package_folder, "share")) tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig")) if self.settings.os == "Windows": # There is no way to avoid building the tests, but at least we don't want them in the package for prefix in ["run", "test"]: for test in glob.glob("%s/bin/%s*" % (self.package_folder, prefix)): os.remove(test) if self.settings.compiler == "Visual Studio": if self.settings.build_type == "Debug": os.unlink(os.path.join(self.package_folder, "bin", "libexslt.pdb")) os.unlink(os.path.join(self.package_folder, "bin", "libxslt.pdb")) os.unlink(os.path.join(self.package_folder, "bin", "xsltproc.pdb")) if self.options.shared: os.unlink(os.path.join(self.package_folder, "lib", "libxslt_a.lib")) os.unlink(os.path.join(self.package_folder, "lib", "libexslt_a.lib")) else: os.unlink(os.path.join(self.package_folder, "lib", "libxslt.lib")) os.unlink(os.path.join(self.package_folder, "lib", "libexslt.lib")) os.unlink(os.path.join(self.package_folder, "bin", "libxslt.dll")) os.unlink(os.path.join(self.package_folder, "bin", "libexslt.dll")) for f in "libxslt.la", "libexslt.la": la = os.path.join(self.package_folder, 'lib', f) if os.path.isfile(la): os.unlink(la) def package_info(self): self.cpp_info.libs = ['exslt', 'xslt'] if self._is_msvc: if self.options.shared: self.cpp_info.libs = ['lib%s' % l for l in self.cpp_info.libs] else: self.cpp_info.libs = ['lib%s_a' % l for l in self.cpp_info.libs] self.cpp_info.includedirs.append(os.path.join("include", "libxslt")) if self.settings.os == "Linux" or self.settings.os == "Macos": self.cpp_info.system_libs.append('m') if self.settings.os == "Windows": self.cpp_info.system_libs.append('ws2_32')
return os.path.join(self.source_folder, self._source_subfolder)
iterator.rs
// Copyright 2019 Fullstop000 <[email protected]>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. use crate::util::comparator::Comparator; use crate::{Error, Result}; use std::cmp::Ordering; /// A common trait for iterating all the key/value entries. /// /// An `Iterator` must be invalid once created pub trait Iterator { /// An iterator is either positioned at a key/value pair, or /// not valid. This method returns true iff the iterator is valid. fn valid(&self) -> bool; /// Position at the first key in the source. The iterator is Valid() /// after this call iff the source is not empty. fn seek_to_first(&mut self); /// Position at the last key in the source. The iterator is /// Valid() after this call iff the source is not empty. fn seek_to_last(&mut self); /// Position at the first key in the source that is at or past target. /// The iterator is valid after this call iff the source contains /// an entry that comes at or past target. fn seek(&mut self, target: &[u8]); /// Moves to the next entry in the source. After this call, the iterator is /// valid iff the iterator was not positioned at the last entry in the source. /// REQUIRES: `valid()` fn next(&mut self); /// Moves to the previous entry in the source. After this call, the iterator /// is valid iff the iterator was not positioned at the first entry in source. /// REQUIRES: `valid()` fn prev(&mut self); /// Return the key for the current entry. The underlying storage for /// the returned slice is valid only until the next modification of /// the iterator. /// REQUIRES: `valid()` fn key(&self) -> &[u8]; /// Return the value for the current entry. The underlying storage for /// the returned slice is valid only until the next modification of /// the iterator. /// REQUIRES: `valid()` fn value(&self) -> &[u8]; /// If an error has occurred, return it. Else return an ok status. fn status(&mut self) -> Result<()>; } /// A concatenated iterator contains an original iterator `origin` and a `DerivedIterFactory`. /// New derived iterator is generated by `factory(origin.value())`. /// The origin Iterator should yield out the last key but not the first. /// This is just like a bucket iterator with lazy generator. pub struct ConcatenateIterator<I: Iterator, F: DerivedIterFactory> { origin: I, factory: F, derived: Option<F::Iter>, prev_derived_value: Vec<u8>, err: Option<Error>, } /// A factory that takes value from the origin and pub trait DerivedIterFactory { type Iter: Iterator; /// Create a new `Iterator` based on value yield by original `Iterator` fn derive(&self, value: &[u8]) -> Result<Self::Iter>; }
Self { origin, factory, derived: None, prev_derived_value: vec![], err: None, } } #[inline] fn maybe_save_err(old: &mut Option<Error>, new: Result<()>) { if old.is_none() { if let Err(e) = new { *old = Some(e); } } } // Create a derived iter from the current value of the origin iter. // Only works when current derived iter is `None` or the previous origin value has been changed. // Same as `InitDataBlock` in C++ implementation fn init_derived_iter(&mut self) { if !self.origin.valid() { self.derived = None } else { let v = self.origin.value(); if self.derived.is_none() || v.cmp(self.prev_derived_value.as_slice()) != Ordering::Equal { match self.factory.derive(v) { Ok(derived) => { if derived.valid() { self.prev_derived_value = v.to_vec(); } self.set_derived(Some(derived)) } Err(e) => { Self::maybe_save_err(&mut self.err, Err(e)); self.set_derived(None); } } } } } // Same as `SetDataIterator` in C++ implementation #[inline] fn set_derived(&mut self, iter: Option<F::Iter>) { if let Some(iter) = &mut self.derived { Self::maybe_save_err(&mut self.err, iter.status()) } self.derived = iter } // Skip invalid results util finding a valid derived iter by `next()` // If found, set derived iter to the first fn skip_forward(&mut self) { while self.derived.is_none() || !self.derived.as_ref().unwrap().valid() { if !self.origin.valid() { self.set_derived(None); break; } else { self.origin.next(); self.init_derived_iter(); if let Some(i) = &mut self.derived { // init to the first i.seek_to_first(); } } } } // Skip invalid results util finding a valid derived iter by `prev()` // If found, set derived iter to the last fn skip_backward(&mut self) { while self.derived.is_none() || !self.derived.as_ref().unwrap().valid() { if !self.origin.valid() { self.set_derived(None); break; } else { self.origin.prev(); self.init_derived_iter(); if let Some(i) = &mut self.derived { // init to the last i.seek_to_last(); } } } } #[inline] fn valid_or_panic(&self) { assert!( self.valid(), "[concatenated iterator] invalid derived iterator" ) } } impl<I: Iterator, F: DerivedIterFactory> Iterator for ConcatenateIterator<I, F> { fn valid(&self) -> bool { if let Some(e) = &self.err { error!("[concatenated iter] Error: {:?}", e); return false; } if let Some(di) = &self.derived { di.valid() } else { false } } fn seek_to_first(&mut self) { self.origin.seek_to_first(); self.init_derived_iter(); if let Some(di) = self.derived.as_mut() { di.seek_to_first(); } // scan forward util finding the first valid entry self.skip_forward(); } fn seek_to_last(&mut self) { self.origin.seek_to_last(); self.init_derived_iter(); if let Some(di) = self.derived.as_mut() { di.seek_to_last() } // scan backward util finding the first valid entry self.skip_backward(); } fn seek(&mut self, target: &[u8]) { self.origin.seek(target); self.init_derived_iter(); if let Some(di) = self.derived.as_mut() { di.seek(target) } self.skip_forward(); } fn next(&mut self) { self.valid_or_panic(); self.derived.as_mut().map_or((), |di| di.next()); self.skip_forward(); } fn prev(&mut self) { self.valid_or_panic(); self.derived.as_mut().map_or((), |di| di.prev()); self.skip_backward(); } fn key(&self) -> &[u8] { self.valid_or_panic(); self.derived.as_ref().unwrap().key() } fn value(&self) -> &[u8] { self.valid_or_panic(); self.derived.as_ref().unwrap().value() } fn status(&mut self) -> Result<()> { self.origin.status()?; if let Some(di) = self.derived.as_mut() { di.status()? }; if let Some(e) = self.err.take() { return Err(e); } Ok(()) } } #[derive(Eq, PartialEq)] enum IterDirection { Forward, Reverse, } pub struct KMergeIter<T: KMergeCore> { core: T, current: usize, direction: IterDirection, } impl<T: KMergeCore> KMergeIter<T> { pub fn new(core: T) -> Self { let current = core.iters_len(); Self { core, current, direction: IterDirection::Forward, } } } /// An trait defines the operation in k merge sort pub trait KMergeCore { type Cmp: Comparator; /// Returns current comparator fn cmp(&self) -> &Self::Cmp; /// The inner child iterators size fn iters_len(&self) -> usize; /// Updates the smallest if given `iter` has a smaller value and returns true. /// Otherwise returns false. fn smaller<'a>(&self, smallest: &mut Option<&'a [u8]>, iter: &'a dyn Iterator) -> bool { if iter.valid() && (smallest.is_none() || self.cmp().compare(iter.key(), smallest.as_ref().unwrap()) == Ordering::Less) { *smallest = Some(iter.key()); true } else { false } } /// Updates the smallest if given `iter` has a smaller value and returns true. /// Otherwise returns false. fn larger<'a>(&self, largest: &mut Option<&'a [u8]>, iter: &'a dyn Iterator) -> bool { if iter.valid() && (largest.is_none() || self.cmp().compare(iter.key(), largest.as_ref().unwrap()) == Ordering::Greater) { *largest = Some(iter.key()); true } else { false } } /// Find the iterator with the smallest 'key' and set it as current fn find_smallest(&mut self) -> usize; /// Find the iterator with the largest 'key' and set it as current fn find_largest(&mut self) -> usize; /// Returns an immutable borrow of ith child iterator fn get_child(&self, i: usize) -> &dyn Iterator; /// Returns a mutable borrow of ith child iterator fn get_child_mut(&mut self, i: usize) -> &mut dyn Iterator; /// Iterate each child iterator and call `f` fn for_each_child<F>(&mut self, f: F) where F: FnMut(&mut dyn Iterator); /// Iterate each child iterator except the ith iterator and call `f` fn for_not_ith<F>(&mut self, i: usize, f: F) where F: FnMut(&mut dyn Iterator, &Self::Cmp); /// Returns `Err` if inner children has errors. fn take_err(&mut self) -> Result<()>; } impl<T: KMergeCore> Iterator for KMergeIter<T> { fn valid(&self) -> bool { let i = self.current; if i < self.core.iters_len() { self.core.get_child(self.current).valid() } else { false } } fn seek_to_first(&mut self) { self.core.for_each_child(|i| i.seek_to_first()); self.current = self.core.find_smallest(); self.direction = IterDirection::Forward; } fn seek_to_last(&mut self) { self.core.for_each_child(|i| i.seek_to_last()); self.current = self.core.find_largest(); self.direction = IterDirection::Reverse; } fn seek(&mut self, target: &[u8]) { self.core.for_each_child(|i| i.seek(target)); self.current = self.core.find_smallest(); self.direction = IterDirection::Forward; } fn next(&mut self) { if self.direction != IterDirection::Forward { let key = self.key().to_vec(); self.core.for_not_ith(self.current, |child, cmp| { child.seek(&key); if child.valid() && cmp.compare(&key, child.key()) == Ordering::Equal { child.next(); } }); self.direction = IterDirection::Forward; } self.core.get_child_mut(self.current).next(); self.current = self.core.find_smallest(); } fn prev(&mut self) { if self.direction != IterDirection::Reverse { let key = self.key().to_vec(); self.core.for_not_ith(self.current, |child, _| { child.seek(&key); if child.valid() { child.prev(); } else { // Child has no key >= current key so point to the last child.seek_to_last(); } }); self.direction = IterDirection::Reverse; } self.core.get_child_mut(self.current).prev(); self.current = self.core.find_largest(); } fn key(&self) -> &[u8] { self.core.get_child(self.current).key() } fn value(&self) -> &[u8] { self.core.get_child(self.current).value() } fn status(&mut self) -> Result<()> { self.core.take_err() } } #[cfg(test)] mod tests { use crate::iterator::*; use crate::rand::Rng; use crate::util::comparator::BytewiseComparator; use crate::Result; use std::cmp::Ordering; use std::str; /// An helper to merge several `I` in merge iterating style struct SimpleKMerger<I: Iterator, C: Comparator> { cmp: C, children: Vec<I>, } impl<I: Iterator, C: Comparator> KMergeCore for SimpleKMerger<I, C> { type Cmp = C; fn cmp(&self) -> &Self::Cmp { &self.cmp } fn iters_len(&self) -> usize { self.children.len() } fn find_smallest(&mut self) -> usize { let mut smallest: Option<&[u8]> = None; let mut index = self.iters_len(); for (i, child) in self.children.iter().enumerate() { if self.smaller(&mut smallest, child) { index = i } } index } fn find_largest(&mut self) -> usize { let mut largest: Option<&[u8]> = None; let mut index = self.iters_len(); for (i, child) in self.children.iter().enumerate() { if self.larger(&mut largest, child) { index = i } } index } fn get_child(&self, i: usize) -> &dyn Iterator { self.children.get(i).unwrap() as &dyn Iterator } fn get_child_mut(&mut self, i: usize) -> &mut dyn Iterator { self.children.get_mut(i).unwrap() as &mut dyn Iterator } fn for_each_child<F>(&mut self, mut f: F) where F: FnMut(&mut dyn Iterator), { self.children .iter_mut() .for_each(|i| f(i as &mut dyn Iterator)); } fn for_not_ith<F>(&mut self, n: usize, mut f: F) where F: FnMut(&mut dyn Iterator, &Self::Cmp), { for (i, child) in self.children.iter_mut().enumerate() { if i != n { f(child as &mut dyn Iterator, &self.cmp) } } } fn take_err(&mut self) -> Result<()> { for i in self.children.iter_mut() { let status = i.status(); if status.is_err() { return status; } } Ok(()) } } // Divide given ordered `src` into `n` lists and then construct a `MergingIterator` with them fn new_test_merging_iter( mut src: Vec<String>, n: usize, ) -> KMergeIter<SimpleKMerger<TestSimpleArrayIter, BytewiseComparator>> { let mut children = vec![]; for _ in 0..n { children.push(vec![]); } src.sort(); let mut rnd = rand::thread_rng(); // Separate value into all children randomly for v in src { let i = rnd.gen_range(0, n); let child = children.get_mut(i).unwrap(); child.push(v); } let cmp = BytewiseComparator::default(); let iters = children .drain(..) .map(|mut child| { child.sort(); TestSimpleArrayIter::new(child) }) .collect::<Vec<_>>(); KMergeIter::new(SimpleKMerger { cmp, children: iters, }) } struct SortedIterTestSuite<O: Iterator, S: Iterator> { origin: O, // A sorted array based iterator shadow: S, // The iterator to be tested } impl<O: Iterator, S: Iterator> SortedIterTestSuite<O, S> { fn new(origin: O, shadow: S) -> Self { Self { origin, shadow } } fn assert_valid(&self, expect: bool) { assert_eq!(self.origin.valid(), expect); assert_eq!(self.origin.valid(), self.shadow.valid()); } fn assert_key_and_value(&self) { assert_eq!(self.origin.key(), self.shadow.key()); assert_eq!(self.origin.value(), self.shadow.value()); } fn assert_iter_forward(&mut self) { self.seek_to_first(); while self.valid() { self.assert_key_and_value(); self.next(); } self.assert_valid(false); } fn assert_iter_backward(&mut self) { self.seek_to_last(); while self.valid() { self.assert_key_and_value(); self.prev(); } self.assert_valid(false); } } impl<O: Iterator, S: Iterator> Iterator for SortedIterTestSuite<O, S> { fn valid(&self) -> bool { self.origin.valid() && self.shadow.valid() } fn seek_to_first(&mut self) { self.origin.seek_to_first(); self.shadow.seek_to_first(); } fn seek_to_last(&mut self) { self.origin.seek_to_last(); self.shadow.seek_to_last(); } fn seek(&mut self, target: &[u8]) { self.origin.seek(target); self.shadow.seek(target); } fn next(&mut self) { self.origin.next(); self.shadow.next(); } fn prev(&mut self) { self.origin.prev(); self.shadow.prev(); } fn key(&self) -> &[u8] { unimplemented!() } fn value(&self) -> &[u8] { unimplemented!() } fn status(&mut self) -> Result<()> { unimplemented!() } } #[derive(Debug)] struct TestSimpleArrayIter { inner: Vec<String>, current: usize, } impl TestSimpleArrayIter { fn new(mut inner: Vec<String>) -> Self { inner.sort(); let current = inner.len(); Self { inner, current } } fn valid_or_panic(&self) { if !self.valid() { panic!("Invalid iterator {:?}", &self) } } } impl Iterator for TestSimpleArrayIter { fn valid(&self) -> bool { self.current < self.inner.len() && self.inner.len() > 0 } fn seek_to_first(&mut self) { self.current = 0; } fn seek_to_last(&mut self) { if self.inner.len() > 0 { self.current = self.inner.len() - 1 } } fn seek(&mut self, target: &[u8]) { let mut current = self.inner.len() + 1; for (i, s) in self.inner.iter().enumerate() { match s.as_bytes().cmp(target) { Ordering::Equal | Ordering::Greater => { current = i; break; } _ => continue, } } self.current = current; } fn next(&mut self) { self.valid_or_panic(); self.current += 1; } fn prev(&mut self) { self.valid_or_panic(); if self.current > 0 { self.current -= 1 } else { // marked as invalid self.current = self.inner.len() } } fn key(&self) -> &[u8] { self.valid_or_panic(); self.inner[self.current].as_bytes() } fn value(&self) -> &[u8] { self.key() } fn status(&mut self) -> Result<()> { Ok(()) } } struct SimpleDeriveFactory {} impl SimpleDeriveFactory { fn new() -> Self { Self {} } } impl DerivedIterFactory for SimpleDeriveFactory { type Iter = TestSimpleArrayIter; fn derive(&self, value: &[u8]) -> Result<Self::Iter> { let c = str::from_utf8(value) .unwrap() .chars() .nth(0) .unwrap() .to_string(); let inner = vec![c.clone(), c.as_str().repeat(2), c.as_str().repeat(3)]; Ok(TestSimpleArrayIter::new(inner)) } } #[test] fn test_concatenated_iterator() { // inner: [a, aa, aaa, b, bb, bbb, c, cc, ccc] let mut iter = ConcatenateIterator::new( TestSimpleArrayIter::new(vec!["aaa".to_owned(), "bbb".to_owned(), "ccc".to_owned()]), SimpleDeriveFactory::new(), ); assert!(!iter.valid()); iter.seek_to_first(); assert_eq!(str::from_utf8(iter.key()).unwrap(), "a"); assert_eq!(str::from_utf8(iter.value()).unwrap(), "a"); iter.next(); assert_eq!(str::from_utf8(iter.key()).unwrap(), "aa"); iter.seek_to_last(); assert_eq!(str::from_utf8(iter.key()).unwrap(), "ccc"); iter.prev(); assert_eq!(str::from_utf8(iter.key()).unwrap(), "cc"); iter.seek_to_first(); iter.seek("b".as_bytes()); assert_eq!(str::from_utf8(iter.key()).unwrap(), "b"); iter.seek("bb".as_bytes()); assert_eq!(str::from_utf8(iter.key()).unwrap(), "bb"); iter.seek("bbbb".as_bytes()); assert_eq!(str::from_utf8(iter.key()).unwrap(), "c"); // Test seeking out of range iter.seek("1".as_bytes()); assert_eq!(str::from_utf8(iter.key()).unwrap(), "a"); iter.seek("d".as_bytes()); assert!(!iter.valid()); } #[test] fn test_merging_iterator() { let mut input = vec![]; for i in 1..100 { input.push(i.to_string()); } input.sort(); let tests = vec![1, 5, 10, 50]; for t in tests { let merging_iter = new_test_merging_iter(input.clone(), t); let origin = TestSimpleArrayIter::new(input.clone()); let mut suite = SortedIterTestSuite::new(origin, merging_iter); suite.assert_valid(false); suite.seek_to_first(); suite.assert_key_and_value(); suite.seek_to_last(); suite.assert_key_and_value(); suite.seek("3".as_bytes()); suite.assert_key_and_value(); suite.prev(); suite.assert_key_and_value(); suite.next(); suite.assert_key_and_value(); suite.seek("0".as_bytes()); suite.assert_key_and_value(); suite.seek("9999".as_bytes()); suite.assert_valid(false); suite.assert_iter_forward(); suite.assert_iter_backward(); } } }
impl<I: Iterator, F: DerivedIterFactory> ConcatenateIterator<I, F> { pub fn new(origin: I, factory: F) -> Self {
http.rs
use crate::parser::{before, bytes, exact, repeat, single, Applicator, Matcher, unit, ParserExt}; use crate::stream::ByteStream; use std::ops::Add; pub fn as_string(bytes: Vec<u8>) -> String { // Consider changing to: std::str::from_utf8(&[u8]) -> Result<&str> // Note: from_utf8 can fail for invalid UTF-8 codes // Line below won't fail, but will provide incorrect result bytes.into_iter().map(|b| b as char).collect::<String>() } #[derive(Debug)] pub struct Header { pub name: String, pub value: String, } fn header_parser() -> impl Matcher<Header> { unit(|| vec![]) .then(before(':')) .map(|(mut vec, val)| { vec.push(as_string(val)); vec }) .then(single(':')) .map(|(vec, _)| vec)
.map(|(vec, _)| vec) .then(before('\r')) .map(|(mut vec, val)| { vec.push(as_string(val)); vec }) .then(exact(&[b'\r', b'\n'])) .map(|(vec, _)| vec) .map(|vec| Header { name: vec[0].to_owned(), value: vec[1].to_owned(), }) } #[derive(Debug, Default)] pub struct Request { pub method: String, pub path: String, pub protocol: String, pub headers: Vec<Header>, pub content: Vec<u8>, } #[derive(Debug)] pub struct Response { pub protocol: String, pub code: u16, pub message: String, pub headers: Vec<Header>, pub content: Vec<u8>, } impl Into<String> for Response { fn into(self) -> String { let headers = self .headers .into_iter() .map(|h| format!("{}: {}", h.name, h.value)) .collect::<Vec<String>>() .join("\r\n"); let content = as_string(self.content); format!("{} {} {}\r\n", self.protocol, self.code, self.message) .add(&headers) .add("\r\n\r\n") .add(&content) } } fn request_parser() -> impl Matcher<Request> { unit(|| Request::default()) .then(before(' ')) .save(|req, bytes| req.method = as_string(bytes)) .then(single(' ')) .skip() .then(before(' ')) .save(|req, bytes| req.path = as_string(bytes)) .then(single(' ')) .skip() .then(before('\r')) .save(|req, bytes| req.protocol = as_string(bytes)) .then(exact(&[b'\r', b'\n'])) .skip() .then(repeat(header_parser())) .save(|req, vec| req.headers = vec) .then(exact(&[b'\r', b'\n'])) .skip() .then_with(|req| { let n: usize = get_content_length(req).unwrap_or(0); bytes(n) }) .save(|req, content| req.content = content) } fn get_header_value(req: &Request, name: String) -> Option<String> { req.headers .iter() .find(|h| h.name == name) .map(|h| h.value.clone()) } fn get_content_length(req: &Request) -> Option<usize> { get_header_value(req, "Content-Length".to_string()) .map(|len| len.parse::<usize>().unwrap_or(0)) } pub fn parse_http_request(stream: &mut ByteStream) -> Option<Request> { stream .apply(request_parser()) .map(|r| Some(r)) .unwrap_or_else(|_| None) } #[cfg(test)] mod tests { use super::*; #[test] fn curl_request() { let text = "GET / HTTP/1.1\r\nHost: localhost:9000\r\nUser-Agent: curl/7.64.1\r\nAccept: */*\r\n\r\n"; let mut bs: ByteStream = text.to_string().into(); let req_opt = parse_http_request(&mut bs); let req = req_opt.unwrap(); assert_eq!(req.method, "GET"); assert_eq!(req.path, "/"); assert_eq!(req.protocol, "HTTP/1.1"); assert_eq!(req.headers[0].name, "Host"); assert_eq!(req.headers[0].value, "localhost:9000"); assert_eq!(req.headers[1].name, "User-Agent"); assert_eq!(req.headers[1].value, "curl/7.64.1"); assert_eq!(req.headers[2].name, "Accept"); assert_eq!(req.headers[2].value, "*/*"); assert!(req.content.is_empty()); } #[test] fn http_request() { let text = "GET /docs/index.html HTTP/1.1\r\nHost: www.nowhere123.com\r\nAccept: image/gif, image/jpeg, */*\r\nAccept-Language: en-us\r\nAccept-Encoding: gzip, deflate\r\nContent-Length: 8\r\nUser-Agent: Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)\r\n\r\n0123456\n"; let mut bs: ByteStream = text.to_string().into(); let req_opt = parse_http_request(&mut bs); let req = req_opt.unwrap(); assert_eq!(req.method, "GET"); assert_eq!(req.path, "/docs/index.html"); assert_eq!(req.protocol, "HTTP/1.1"); assert_eq!(req.content, b"0123456\n"); assert_eq!(req.headers[0].name, "Host"); assert_eq!(req.headers[0].value, "www.nowhere123.com"); assert_eq!(req.headers[1].name, "Accept"); assert_eq!(req.headers[1].value, "image/gif, image/jpeg, */*"); assert_eq!(req.headers[2].name, "Accept-Language"); assert_eq!(req.headers[2].value, "en-us"); assert_eq!(req.headers[3].name, "Accept-Encoding"); assert_eq!(req.headers[3].value, "gzip, deflate"); assert_eq!(req.headers[4].name, "Content-Length"); assert_eq!(req.headers[4].value, "8"); assert_eq!(req.headers[5].name, "User-Agent"); assert_eq!( req.headers[5].value, "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)" ); } #[test] fn http_upgrade() { let text = "GET /chat HTTP/1.1\r\nHost: example.com:8000\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\nSec-WebSocket-Version: 13\r\n\r\n"; let mut bs: ByteStream = text.to_string().into(); let req_opt = bs.apply(request_parser()); let req = req_opt.unwrap(); assert_eq!(req.method, "GET"); assert_eq!(req.path, "/chat"); assert_eq!(req.protocol, "HTTP/1.1"); assert!(req.content.is_empty()); assert_eq!(req.headers[0].name, "Host"); assert_eq!(req.headers[0].value, "example.com:8000"); assert_eq!(req.headers[1].name, "Upgrade"); assert_eq!(req.headers[1].value, "websocket"); assert_eq!(req.headers[2].name, "Connection"); assert_eq!(req.headers[2].value, "Upgrade"); assert_eq!(req.headers[3].name, "Sec-WebSocket-Key"); assert_eq!(req.headers[3].value, "dGhlIHNhbXBsZSBub25jZQ=="); assert_eq!(req.headers[4].name, "Sec-WebSocket-Version"); assert_eq!(req.headers[4].value, "13"); } #[test] fn http_response() { let res = Response { protocol: "HTTP/1.1".to_string(), code: 200, message: "OK".to_string(), headers: vec![Header { name: "Content-Length".to_string(), value: "5".to_string(), }], content: b"hello".to_vec(), }; let out: String = res.into(); assert_eq!( out, "HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\nhello".to_string() ); } }
.then(single(' '))
plugin_tests.py
# write your first unittest!
class TestPlugin(unittest.TestCase): @classmethod def setUpClass(self): self.skill_id = "ovos-skill-timer.OpenVoiceOS" def test_find_plugin(self): plugins = find_skill_plugins() self.assertIn(self.skill_id, list(plugins))
import unittest from ovos_plugin_manager.skills import find_skill_plugins
__init__.py
# Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> from datetime import datetime from logging import getLogger import collections from sqlalchemy.sql.expression import tuple_ from ggrc import db from ggrc import models from ggrc.automapper.rules import rules from ggrc.login import get_current_user from ggrc.models.audit import Audit from ggrc.models.relationship import Relationship from ggrc.models.request import Request from ggrc.rbac.permissions import is_allowed_update from ggrc.services.common import Resource, get_cache from ggrc.utils import benchmark, with_nop # pylint: disable=invalid-name logger = getLogger(__name__) class Stub(collections.namedtuple("Stub", ["type", "id"])): @classmethod def from_source(cls, relationship): return Stub(relationship.source_type, relationship.source_id) @classmethod def from_destination(cls, relationship): return Stub(relationship.destination_type, relationship.destination_id) class AutomapperGenerator(object): def __init__(self, use_benchmark=True): self.processed = set() self.queue = set() self.cache = collections.defaultdict(set) self.instance_cache = {} self.auto_mappings = set() if use_benchmark: self.benchmark = benchmark else: self.benchmark = with_nop def related(self, obj): if obj in self.cache: return self.cache[obj] # Pre-fetch neighborhood for enqueued object since we're gonna need that # results in a few steps. This drastically reduces number of queries. stubs = {s for rel in self.queue for s in rel} stubs.add(obj) # Union is here to convince mysql to use two separate indices and # merge te results. Just using `or` results in a full-table scan # Manual column list avoids loading the full object which would also try to # load related objects cols = db.session.query( Relationship.source_type, Relationship.source_id, Relationship.destination_type, Relationship.destination_id) relationships = cols.filter( tuple_(Relationship.source_type, Relationship.source_id).in_( [(s.type, s.id) for s in stubs] ) ).union_all( cols.filter( tuple_(Relationship.destination_type, Relationship.destination_id).in_( [(s.type, s.id) for s in stubs])) ).all() batch_requests = collections.defaultdict(set) for (src_type, src_id, dst_type, dst_id) in relationships: src = Stub(src_type, src_id) dst = Stub(dst_type, dst_id) # only store a neighbor if we queried for it since this way we know # we'll be storing complete neighborhood by the end of the loop batch_requests[src_type].add(src_id) batch_requests[dst_type].add(dst_id) if src in stubs: self.cache[src].add(dst) if dst in stubs: self.cache[dst].add(src) for type_, ids in batch_requests.iteritems(): model = getattr(models.all_models, type_) instances = model.query.filter(model.id.in_(ids)) for instance in instances: self.instance_cache[Stub(type_, instance.id)] = instance return self.cache[obj] def relate(self, src, dst): if src < dst: return (src, dst) else: return (dst, src) def generate_automappings(self, relationship): self.auto_mappings = set() with self.benchmark("Automapping generate_automappings"): # initial relationship is special since it is already created and # processing it would abort the loop so we manually enqueue the # neighborhood src = Stub.from_source(relationship) dst = Stub.from_destination(relationship) self._step(src, dst) self._step(dst, src) count = 0 while len(self.queue) > 0: if len(self.auto_mappings) > rules.count_limit: break count += 1 src, dst = entry = self.queue.pop() if not (self._can_map_to(src, relationship) and self._can_map_to(dst, relationship)): continue created = self._ensure_relationship(src, dst) self.processed.add(entry) if not created: # If the edge already exists it means that auto mappings for it have # already been processed and it is safe to cut here. continue self._step(src, dst) self._step(dst, src) if len(self.auto_mappings) <= rules.count_limit: self._flush(relationship) else: relationship._json_extras = { 'automapping_limit_exceeded': True } def _can_map_to(self, obj, parent_relationship): return is_allowed_update(obj.type, obj.id, parent_relationship.context) def _flush(self, parent_relationship): if len(self.auto_mappings) == 0: return with self.benchmark("Automapping flush"): current_user = get_current_user() now = datetime.now() # We are doing an INSERT IGNORE INTO here to mitigate a race condition # that happens when multiple simultaneous requests create the same # automapping. If a relationship object fails our unique constraint # it means that the mapping was already created by another request # and we can safely ignore it. inserter = Relationship.__table__.insert().prefix_with("IGNORE") original = self.relate(Stub.from_source(parent_relationship), Stub.from_destination(parent_relationship)) db.session.execute(inserter.values([{ "id": None, "modified_by_id": current_user.id, "created_at": now, "updated_at": now, "source_id": src.id, "source_type": src.type, "destination_id": dst.id, "destination_type": dst.type, "context_id": None, "status": None, "automapping_id": parent_relationship.id} for src, dst in self.auto_mappings if (src, dst) != original])) # (src, dst) is sorted cache = get_cache(create=True) if cache: # Add inserted relationships into new objects collection of the cache, # so that they will be logged within event and appropriate revisions # will be created. cache.new.update( (relationship, relationship.log_json()) for relationship in Relationship.query.filter_by( automapping_id=parent_relationship.id, modified_by_id=current_user.id, created_at=now, updated_at=now, ) ) def _step(self, src, dst): explicit, implicit = rules[src.type, dst.type] self._step_explicit(src, dst, explicit) self._step_implicit(src, dst, implicit) def _step_explicit(self, src, dst, explicit): if len(explicit) != 0: src_related = (o for o in self.related(src) if o.type in explicit and o != dst) for r in src_related: entry = self.relate(r, dst) if entry not in self.processed: self.queue.add(entry) def _step_implicit(self, src, dst, implicit): if not hasattr(models.all_models, src.type): logger.warning('Automapping by attr: cannot find model %s', src.type) return instance = self.instance_cache.get(src) if instance is None: model = getattr(models.all_models, src.type) instance = model.query.filter(model.id == src.id).first() self.instance_cache[src] = instance if instance is None: logger.warning("Automapping by attr: cannot load model %s: %s", src.type, src.id) return for attr in implicit: if hasattr(instance, attr.name): values = getattr(instance, attr.name) if not isinstance(values, collections.Iterable): values = [values] for value in values: if value is not None: entry = self.relate(Stub(value.type, value.id), dst) if entry not in self.processed: self.queue.add(entry) else: logger.warning('Automapping by attr: %s is None', attr.name) else: logger.warning( 'Automapping by attr: object %s has no attribute %s', src, attr.name, ) def _ensure_relationship(self, src, dst): if dst in self.cache.get(src, []): return False if src in self.cache.get(dst, []): return False self.auto_mappings.add((src, dst)) if src in self.cache: self.cache[src].add(dst) if dst in self.cache: self.cache[dst].add(src) return True def handle_relationship_post(source, destination):
def generate_relationship_snapshots(obj): """Generate needed snapshots for a given relationship. If we post a relationship for a snapshotable object and an Audit, we will map that object to audits program, make a snapshot for it and map the snapshot to the Audit. NOTE: this function will be deprecated soon. Args: obj: Relationship object. """ from ggrc.snapshotter import rules as snapshot_rules parent = None child = None if "Audit" in obj.source_type: parent = obj.source child = obj.destination elif "Audit" in obj.destination_type: parent = obj.destination child = obj.source if parent and child.type in snapshot_rules.Types.all: db.session.add(models.Snapshot( parent=parent, child_id=child.id, child_type=child.type, update_revision="new", context=parent.context, modified_by=get_current_user() )) def register_automapping_listeners(): """Register event listeners for auto mapper.""" # pylint: disable=unused-variable,unused-argument @Resource.collection_posted.connect_via(Relationship) def handle_relationship_collection_post(sender, objects=None, **kwargs): """Handle bulk creation of relationships. This handler reuses auto mapper cache and is more efficient than handling one object at a time. Args: objects: list of relationship Models. """ automapper = AutomapperGenerator() for obj in objects: if obj is None: logger.warning("Automapping listener: no obj, no mappings created") return generate_relationship_snapshots(obj) automapper.generate_automappings(obj) @Resource.collection_posted.connect_via(Request) def handle_requests_collection_post(sender, objects=None, **kwargs): for obj in objects: handle_relationship_post(obj, obj.audit) @Resource.model_put.connect_via(Request) def handle_request(sender, obj=None, src=None, service=None): handle_relationship_post(obj, obj.audit) @Resource.collection_posted.connect_via(Audit) def handle_audits_collection_post(sender, objects=None, **kwargs): for obj in objects: handle_relationship_post(obj, obj.program)
"""Handle posting of special relationships. This function handles direct relationships that do not have a relationship object. A fake object is created with source and destination and auto mappings are then generated. Args: source: Source model of relationship destination: Destination model of relationship """ if source is None: logger.warning("Automapping request listener: " "no source, no mappings created") return if destination is None: logger.warning("Automapping request listener: " "no destination, no mappings created") return relationship = Relationship(source_type=source.type, source_id=source.id, destination_type=destination.type, destination_id=destination.id) AutomapperGenerator().generate_automappings(relationship)
handler.py
import json import os from botocore.exceptions import ClientError from config.settings import Settings from modules.entities.account import Account from modules.entities.instance import Instance from modules.exceptions import exception from modules.models.instances import Instances from modules.models.owners import Owners from modules.providers.aws.storage import Storage from modules.providers.aws.account import AwsAccountProvider as Provider from modules.providers.aws.notify import Dispatcher from modules.providers.github import repository import datetime as dt from datetime import datetime import modules.logs as logs import modules.response as Response def _build_instance_model(account_name, region, settings=None): if settings is None: try: settings = Settings() except Exception as error: logs.info( "Handler", "Error getting settings {}".format(error.args[0]) ) raise Exception("Error getting settings, {}".format(error.args[0])) account_map = settings.accounts.get(account_name) if not account_map.get("enabled", False): raise Exception("Error {} is set enabled: false".format(account_name)) account = Account(account_map, region) instances = Instances( Provider(account, settings), Dispatcher(settings.config.get("sns")) ) return (account, instances) def list_instances(event, context): if ( "queryStringParameters" not in event or event.get("queryStringParameters") is None ): return Response.error("Missing required parameters: account, region") account_name = event.get("queryStringParameters").get("account", None) region = event.get("queryStringParameters").get("region", None) owner = event.get("queryStringParameters").get("owner", None) logs.info( "Handler", "list_instances: {}, {}, {}".format(account_name, region, owner), ) filters = [] if owner: filters.append({"Name": "tag:Owner", "Values": [owner]}) try: _, instances = _build_instance_model(account_name, region) except Exception as error: logs.info( "Handler", "Error: list_instances setup {}".format(error.args[0]) ) return Response.error(error.args[0]) try: instances_list = instances.list(filters) except Exception as error: logs.info("Handler", "Error: list_instances {}".format(error.args[0])) return Response.error(error.args[0]) return Response.success(instances_list) def create(event, context): if "body" not in event or event.get("body") is None: return Response.error( "Missing required parameters: account, region, id" ) event = json.loads(event.get("body")) account_name = event.get("account", None) region = event.get("region", None) logs.info("Handler", "create: {}, {}".format(account_name, region)) try: account, instances = _build_instance_model(account_name, region) except Exception as error: logs.info("Handler", "Error: create setup {}".format(error.args[0])) return Response.error(error.args[0]) try: instances.create(Instance(event, account)) except Exception as error: return Response.error(error.args[0]) return Response.created( "perfecto - within 5 minutes your instance be ready available!" ) def create_platform(event, context): if "body" not in event or event.get("body") is None: return Response.error( "Missing required parameters: account, region, id" ) event = json.loads(event.get("body")) account_name = event.get("account", None) region = event.get("region", None) logs.info("Handler", "create: {}, {}".format(account_name, region)) try: account, instances = _build_instance_model(account_name, region) except Exception as error: logs.info("Handler", "Error: create setup {}".format(error.args[0])) return Response.error(error.args[0]) try: instances.create(Instance(event, account), event.get("platform")) except Exception as error: return Response.error(error.args[0]) return Response.created( "cheerios - within 5 to 8 minutes your platform instance will be ready!" ) def
(event, context): if "body" not in event or event.get("body") is None: return Response.error( "Missing required parameters: account, region, id" ) event = json.loads(event.get("body")) account_name = event.get("account", None) region = event.get("region", None) logs.info("Handler", "update: {}, {}".format(account_name, region)) try: account, instances = _build_instance_model(account_name, region) except Exception as error: logs.info("Handler", "Error: update setup {}".format(error.args[0])) return Response.error(error.args[0]) try: entity = Instance(event, account) instances.update(entity) except Exception as error: logs.info("Handler", "Error: update {}".format(error.args[0])) return Response.error(error.args[0]) return Response.success("all right - instance attributes updated!") def start(event, context): if "body" not in event or event.get("body") is None: return Response.error( "Missing required parameters: account, region, id" ) event = json.loads(event.get("body")) account_name = event.get("account", None) region = event.get("region", None) instance_id = event.get("id", None) logs.info( "Handler", "start: {}, {}, {}".format(account_name, region, instance_id), ) try: _, instances = _build_instance_model(account_name, region) except Exception as error: logs.info("Handler", "Error: start setup {}".format(error.args[0])) return Response.error(error.args[0]) try: instances.start(instance_id) except exception.DnsRecordsTagsMissing as error: logs.info( "Handler", "DnsRecordsTagsMissing: Start instance {}".format(error.args[0]), ) return Response.success( "instance started, but ...{}".format(error.args[0]) ) except Exception as error: logs.info("Handler", "Error: Start instance {}".format(error.args[0])) return Response.error(error.args[0]) return Response.success("wohooo - instance started!") def stop(event, context): if "body" not in event or event.get("body") is None: return Response.error( "Missing required parameters: account, region, id" ) event = json.loads(event.get("body")) account_name = event.get("account", None) region = event.get("region", None) instance_id = event.get("id", None) logs.info( "Handler", "stop: {}, {}, {}".format(account_name, region, instance_id) ) try: _, instances = _build_instance_model(account_name, region) except Exception as error: logs.info("Handler", "Error: stop setup {}".format(error.args[0])) return Response.error(error.args[0]) try: instances.stop(instance_id) except Exception as error: logs.info("Handler", "Error: stop {}".format(error.args[0])) return Response.error(error.args[0]) return Response.success("geweldig - instance is stopping now!") def terminate(event, context): if "body" not in event or event.get("body") is None: return Response.error( "Missing required parameters: account, region, id" ) event = json.loads(event.get("body")) account_name = event.get("account", None) region = event.get("region", None) instance_id = event.get("id", None) logs.info( "Handler", "terminate: {}, {}, {}".format(account_name, region, instance_id), ) try: _, instances = _build_instance_model(account_name, region) except Exception as error: logs.info( "Handler", "Error: list_images setup {}".format(error.args[0]) ) return Response.error(error.args[0]) try: instances.terminate(instance_id) except Exception as error: logs.info("Handler", "Error: list_images {}".format(error.args[0])) return Response.error(error.args[0]) return Response.success("geweldig - killing the instance!") def info(event, context): storage = Storage(Settings().config) try: owners_list = storage.list_all_owners() except Exception as error: logs.info("Handler", "Error: list_all_owners {}".format(error.args[0])) return Response.error(error.args[0]) response = { "users": owners_list, "environments": ["test", "prod", "poc", "demo"], } return Response.success(response) def list_images(event, context): if ( "queryStringParameters" not in event or event.get("queryStringParameters") is None ): return Response.error("Missing required parameters: account, region") account_name = event.get("queryStringParameters").get("account", None) region = event.get("queryStringParameters").get("region", None) logs.info("Handler", "list_images: {}, {}".format(account_name, region)) try: _, instances = _build_instance_model(account_name, region) except Exception as error: logs.info( "Handler", "Error: list_images setup {}".format(error.args[0]) ) return Response.error(error.args[0]) try: images = instances.list_images() except Exception as error: logs.info("Handler", "Error: list_images {}".format(error.args[0])) return Response.error(error.args[0]) return Response.success(images) def list_securitygroups(event, context): if ( "queryStringParameters" not in event or event.get("queryStringParameters") is None ): return Response.error("Missing required parameters: account, region") account_name = event.get("queryStringParameters", {}).get("account", None) region = event.get("queryStringParameters", {}).get("region", None) logs.info( "Handler", "list_securitygroups: {}, {}".format(account_name, region) ) try: _, instances = _build_instance_model(account_name, region) except Exception as error: logs.info( "Handler", "Error: list_securitygroups setup {}".format(error.args[0]), ) return Response.error(error.args[0]) try: securitygroups = instances.list_security_groups() except Exception as error: logs.info( "Handler", "Error: list_securitygroups {}".format(error.args[0]) ) return Response.error(error.args[0]) return Response.success(securitygroups) def list_accounts(event, context): settings = Settings() astngs = [] for k, v in settings.accounts.items(): if v.get("enabled", False): astngs.append(v) return Response.success(astngs) def stop_tagged_instances(event, context): current_hour = dt.datetime.today().hour try: settings = Settings() except Exception as error: logs.info("Handler", "Error getting settings {}".format(error.args[0])) return Response.error(error.args[0]) for account_name in settings.accounts: account_config = settings.accounts.get(account_name) if not account_config.get("enabled", False): logs.info( "Handler", "Info: stop_tagged_instances account {} not enabled, skipping".format( account_name ), ) continue for region in account_config.get("regions"): try: _, instances = _build_instance_model( account_name, region, settings ) except Exception as error: logs.info( "Handler", "Error: stop_tagged_instances setup {}".format( error.args[0] ), ) continue pass try: instances.stop_tagged_instances( [{"Name": "tag:StopTime", "Values": [str(current_hour)]}] ) except Exception as error: logs.info( "Handler", "Error: stop_tagged_instances {}".format(error.args[0]), ) continue pass return Response.success("ok") def start_tagged_instances(event, context): current_hour = dt.datetime.today().hour try: settings = Settings() except Exception as error: logs.info("Handler", "Error getting settings {}".format(error.args[0])) return Response.error(error.args[0]) for account_name in settings.accounts: logs.info( "Handler", "Info: start_tagged_instances account {}".format(account_name), ) account_config = settings.accounts.get(account_name) if not account_config.get("enabled", False): logs.info( "Handler", "Info: start_tagged_instances account {} not enabled, skipping".format( account_name ), ) continue for region in account_config.get("regions"): logs.info( "Handler", "Info: start_tagged_instances region {}".format(region), ) try: _, instances = _build_instance_model( account_name, region, settings ) except Exception as error: logs.info( "Handler", "Error: start_tagged_instances setup {}".format( error.args[0] ), ) continue pass try: instances.start_tagged_instances( [{"Name": "tag:StartTime", "Values": [str(current_hour)]}] ) except Exception as error: logs.info( "Handler", "Error: start_tagged_instances {}".format(error.args[0]), ) continue pass return Response.success("ok") def terminate_tagged_instances(event, context): current_date = datetime.now().strftime("%d/%m/%Y") try: settings = Settings() except Exception as error: logs.info("Handler", "Error getting settings {}".format(error.args[0])) return Response.error(error.args[0]) for account_name in settings.accounts: account_config = settings.accounts.get(account_name) if not account_config.get("enabled", False): logs.info( "Handler", "Info: terminate_tagged_instances account {} not enabled, skipping".format( account_name ), ) continue for region in account_config.get("regions"): try: _, instances = _build_instance_model( account_name, region, settings ) except Exception as error: logs.info( "Handler", "Error: terminate_tagged_instances setup {}".format( error.args[0] ), ) continue pass try: instances.terminate_tagged_instances( [ { "Name": "tag:TerminateDate", "Values": [str(current_date)], }, ] ) except Exception as error: logs.info( "Handler", "Error: terminate_tagged_instances {}".format( error.args[0] ), ) continue pass return Response.success("ok") def create_image(event, context): if "body" not in event or event.get("body") is None: return Response.error( "Missing required parameters: account, region, id" ) event = json.loads(event.get("body")) account_name = event.get("account", None) region = event.get("region", None) instance_id = event.get("id", None) image_name = event.get("image_name", None) logs.info( "Handler", "start: {}, {}, {}".format(account_name, region, instance_id), ) try: _, instances = _build_instance_model(account_name, region) except Exception as error: logs.info("Handler", "Error: start setup {}".format(error.args[0])) return Response.error(error.args[0]) try: instances.create_image(instance_id, image_name) except Exception as error: logs.info("Handler", "Error: Start Instance {}".format(error.args[0])) return Response.error(error.args[0]) return Response.success("ja precies - creating image!") def sync_users_owners(event, context): settings = Settings() owners = Owners( repository.Users(settings.config.get("github")), Storage(settings.config), ) owners.sync() def instance_ready(event, context): if "body" not in event or event.get("body") is None: return Response.error( "Missing required parameters: account, region, id" ) event = json.loads(event.get("body")) account_name = event.get("account", None) region = event.get("region", None) instance_id = event.get("id", None) logs.info( "Handler", "start: {}, {}, {}".format(account_name, region, instance_id), ) try: _, instances = _build_instance_model(account_name, region) except Exception as error: logs.info("Handler", "Error: start setup {}".format(error.args[0])) return Response.error(error.args[0]) try: instances.set_ready_state(instance_id) except Exception as error: logs.info("Handler", "Error: set_ready_state {}".format(error.args[0])) return Response.error(error.args[0]) return Response.success("wohooo - ready state set!") if __name__ == "__main__": sync_users_owners(None, None)
update
sign.go
package bitcoin import ( "bytes" "crypto/sha256" "encoding/hex" "errors" "fmt" "time" "github.com/OpenBazaar/spvwallet" wi "github.com/OpenBazaar/wallet-interface" "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" btc "github.com/btcsuite/btcutil" "github.com/btcsuite/btcutil/coinset" hd "github.com/btcsuite/btcutil/hdkeychain" "github.com/btcsuite/btcutil/txsort" "github.com/btcsuite/btcwallet/wallet/txauthor" "github.com/btcsuite/btcwallet/wallet/txrules" "github.com/OpenBazaar/multiwallet/util" ) func (w *BitcoinWallet) buildTx(amount int64, addr btc.Address, feeLevel wi.FeeLevel, optionalOutput *wire.TxOut) (*wire.MsgTx, error) { // Check for dust script, _ := txscript.PayToAddrScript(addr) if txrules.IsDustAmount(btc.Amount(amount), len(script), txrules.DefaultRelayFeePerKb) { return nil, wi.ErrorDustAmount } var additionalPrevScripts map[wire.OutPoint][]byte var additionalKeysByAddress map[string]*btc.WIF // Create input source height, _ := w.ws.ChainTip() utxos, err := w.db.Utxos().GetAll() if err != nil { return nil, err } coinMap := util.GatherCoins(height, utxos, w.ScriptToAddress, w.km.GetKeyForScript) coins := make([]coinset.Coin, 0, len(coinMap)) for k := range coinMap { coins = append(coins, k) } inputSource := func(target btc.Amount) (total btc.Amount, inputs []*wire.TxIn, inputValues []btc.Amount, scripts [][]byte, err error) { coinSelector := coinset.MaxValueAgeCoinSelector{MaxInputs: 10000, MinChangeAmount: btc.Amount(0)} coins, err := coinSelector.CoinSelect(target, coins) if err != nil { return total, inputs, inputValues, scripts, wi.ErrorInsuffientFunds } additionalPrevScripts = make(map[wire.OutPoint][]byte) additionalKeysByAddress = make(map[string]*btc.WIF) for _, c := range coins.Coins() { total += c.Value() outpoint := wire.NewOutPoint(c.Hash(), c.Index()) in := wire.NewTxIn(outpoint, []byte{}, [][]byte{}) in.Sequence = 0 // Opt-in RBF so we can bump fees inputs = append(inputs, in) additionalPrevScripts[*outpoint] = c.PkScript() key := coinMap[c] addr, err := key.Address(w.params) if err != nil { continue } privKey, err := key.ECPrivKey() if err != nil { continue } wif, _ := btc.NewWIF(privKey, w.params, true) additionalKeysByAddress[addr.EncodeAddress()] = wif } return total, inputs, inputValues, scripts, nil } // Get the fee per kilobyte feePerKB := int64(w.GetFeePerByte(feeLevel)) * 1000 // outputs out := wire.NewTxOut(amount, script) // Create change source changeSource := func() ([]byte, error) { addr := w.CurrentAddress(wi.INTERNAL) script, err := txscript.PayToAddrScript(addr) if err != nil { return []byte{}, err } return script, nil } outputs := []*wire.TxOut{out} if optionalOutput != nil { outputs = append(outputs, optionalOutput) } authoredTx, err := newUnsignedTransaction(outputs, btc.Amount(feePerKB), inputSource, changeSource) if err != nil { return nil, err } // BIP 69 sorting txsort.InPlaceSort(authoredTx.Tx) // Sign tx getKey := txscript.KeyClosure(func(addr btc.Address) (*btcec.PrivateKey, bool, error) { addrStr := addr.EncodeAddress() wif := additionalKeysByAddress[addrStr] return wif.PrivKey, wif.CompressPubKey, nil }) getScript := txscript.ScriptClosure(func( addr btc.Address) ([]byte, error) { return []byte{}, nil }) for i, txIn := range authoredTx.Tx.TxIn { prevOutScript := additionalPrevScripts[txIn.PreviousOutPoint] script, err := txscript.SignTxOutput(w.params, authoredTx.Tx, i, prevOutScript, txscript.SigHashAll, getKey, getScript, txIn.SignatureScript) if err != nil { return nil, errors.New("Failed to sign transaction") } txIn.SignatureScript = script } return authoredTx.Tx, nil } func
(outputs []*wire.TxOut, feePerKb btc.Amount, fetchInputs txauthor.InputSource, fetchChange txauthor.ChangeSource) (*txauthor.AuthoredTx, error) { var targetAmount btc.Amount for _, txOut := range outputs { targetAmount += btc.Amount(txOut.Value) } estimatedSize := EstimateSerializeSize(1, outputs, true, P2PKH) targetFee := txrules.FeeForSerializeSize(feePerKb, estimatedSize) for { inputAmount, inputs, _, scripts, err := fetchInputs(targetAmount + targetFee) if err != nil { return nil, err } if inputAmount < targetAmount+targetFee { return nil, errors.New("insufficient funds available to construct transaction") } maxSignedSize := EstimateSerializeSize(len(inputs), outputs, true, P2PKH) maxRequiredFee := txrules.FeeForSerializeSize(feePerKb, maxSignedSize) remainingAmount := inputAmount - targetAmount if remainingAmount < maxRequiredFee { targetFee = maxRequiredFee continue } unsignedTransaction := &wire.MsgTx{ Version: wire.TxVersion, TxIn: inputs, TxOut: outputs, LockTime: 0, } changeIndex := -1 changeAmount := inputAmount - targetAmount - maxRequiredFee if changeAmount != 0 && !txrules.IsDustAmount(changeAmount, P2PKHOutputSize, txrules.DefaultRelayFeePerKb) { changeScript, err := fetchChange() if err != nil { return nil, err } if len(changeScript) > P2PKHPkScriptSize { return nil, errors.New("fee estimation requires change " + "scripts no larger than P2PKH output scripts") } change := wire.NewTxOut(int64(changeAmount), changeScript) l := len(outputs) unsignedTransaction.TxOut = append(outputs[:l:l], change) changeIndex = l } return &txauthor.AuthoredTx{ Tx: unsignedTransaction, PrevScripts: scripts, TotalInput: inputAmount, ChangeIndex: changeIndex, }, nil } } func (w *BitcoinWallet) bumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { txn, err := w.db.Txns().Get(txid) if err != nil { return nil, err } if txn.Height > 0 { return nil, spvwallet.BumpFeeAlreadyConfirmedError } if txn.Height < 0 { return nil, spvwallet.BumpFeeTransactionDeadError } // Check utxos for CPFP utxos, _ := w.db.Utxos().GetAll() for _, u := range utxos { if u.Op.Hash.IsEqual(&txid) && u.AtHeight == 0 { addr, err := w.ScriptToAddress(u.ScriptPubkey) if err != nil { return nil, err } key, err := w.km.GetKeyForScript(addr.ScriptAddress()) if err != nil { return nil, err } h, err := hex.DecodeString(u.Op.Hash.String()) if err != nil { return nil, err } in := wi.TransactionInput{ LinkedAddress: addr, OutpointIndex: u.Op.Index, OutpointHash: h, Value: int64(u.Value), } transactionID, err := w.sweepAddress([]wi.TransactionInput{in}, nil, key, nil, wi.FEE_BUMP) if err != nil { return nil, err } return transactionID, nil } } return nil, spvwallet.BumpFeeNotFoundError } func (w *BitcoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (*chainhash.Hash, error) { var internalAddr btc.Address if address != nil { internalAddr = *address } else { internalAddr = w.CurrentAddress(wi.INTERNAL) } script, err := txscript.PayToAddrScript(internalAddr) if err != nil { return nil, err } var val int64 var inputs []*wire.TxIn additionalPrevScripts := make(map[wire.OutPoint][]byte) for _, in := range ins { val += in.Value ch, err := chainhash.NewHashFromStr(hex.EncodeToString(in.OutpointHash)) if err != nil { return nil, err } script, err := txscript.PayToAddrScript(in.LinkedAddress) if err != nil { return nil, err } outpoint := wire.NewOutPoint(ch, in.OutpointIndex) input := wire.NewTxIn(outpoint, []byte{}, [][]byte{}) inputs = append(inputs, input) additionalPrevScripts[*outpoint] = script } out := wire.NewTxOut(val, script) txType := P2PKH if redeemScript != nil { txType = P2SH_1of2_Multisig _, err := spvwallet.LockTimeFromRedeemScript(*redeemScript) if err == nil { txType = P2SH_Multisig_Timelock_1Sig } } estimatedSize := EstimateSerializeSize(len(ins), []*wire.TxOut{out}, false, txType) // Calculate the fee feePerByte := int(w.GetFeePerByte(feeLevel)) fee := estimatedSize * feePerByte outVal := val - int64(fee) if outVal < 0 { outVal = 0 } out.Value = outVal tx := &wire.MsgTx{ Version: wire.TxVersion, TxIn: inputs, TxOut: []*wire.TxOut{out}, LockTime: 0, } // BIP 69 sorting txsort.InPlaceSort(tx) // Sign tx privKey, err := key.ECPrivKey() if err != nil { return nil, err } pk := privKey.PubKey().SerializeCompressed() addressPub, err := btc.NewAddressPubKey(pk, w.params) getKey := txscript.KeyClosure(func(addr btc.Address) (*btcec.PrivateKey, bool, error) { if addressPub.EncodeAddress() == addr.EncodeAddress() { wif, err := btc.NewWIF(privKey, w.params, true) if err != nil { return nil, false, err } return wif.PrivKey, wif.CompressPubKey, nil } return nil, false, errors.New("Not found") }) getScript := txscript.ScriptClosure(func(addr btc.Address) ([]byte, error) { if redeemScript == nil { return []byte{}, nil } return *redeemScript, nil }) // Check if time locked var timeLocked bool if redeemScript != nil { rs := *redeemScript if rs[0] == txscript.OP_IF { timeLocked = true tx.Version = 2 for _, txIn := range tx.TxIn { locktime, err := spvwallet.LockTimeFromRedeemScript(*redeemScript) if err != nil { return nil, err } txIn.Sequence = locktime } } } hashes := txscript.NewTxSigHashes(tx) for i, txIn := range tx.TxIn { if redeemScript == nil { prevOutScript := additionalPrevScripts[txIn.PreviousOutPoint] script, err := txscript.SignTxOutput(w.params, tx, i, prevOutScript, txscript.SigHashAll, getKey, getScript, txIn.SignatureScript) if err != nil { return nil, errors.New("Failed to sign transaction") } txIn.SignatureScript = script } else { sig, err := txscript.RawTxInWitnessSignature(tx, hashes, i, ins[i].Value, *redeemScript, txscript.SigHashAll, privKey) if err != nil { return nil, err } var witness wire.TxWitness if timeLocked { witness = wire.TxWitness{sig, []byte{}} } else { witness = wire.TxWitness{[]byte{}, sig} } witness = append(witness, *redeemScript) txIn.Witness = witness } } // broadcast if err := w.Broadcast(tx); err != nil { return nil, err } txid := tx.TxHash() return &txid, nil } func (w *BitcoinWallet) createMultisigSignature(ins []wi.TransactionInput, outs []wi.TransactionOutput, key *hd.ExtendedKey, redeemScript []byte, feePerByte uint64) ([]wi.Signature, error) { var sigs []wi.Signature tx := wire.NewMsgTx(1) for _, in := range ins { ch, err := chainhash.NewHashFromStr(hex.EncodeToString(in.OutpointHash)) if err != nil { return sigs, err } outpoint := wire.NewOutPoint(ch, in.OutpointIndex) input := wire.NewTxIn(outpoint, []byte{}, [][]byte{}) tx.TxIn = append(tx.TxIn, input) } for _, out := range outs { scriptPubKey, err := txscript.PayToAddrScript(out.Address) if err != nil { return sigs, err } output := wire.NewTxOut(out.Value, scriptPubKey) tx.TxOut = append(tx.TxOut, output) } // Subtract fee txType := P2SH_2of3_Multisig _, err := spvwallet.LockTimeFromRedeemScript(redeemScript) if err == nil { txType = P2SH_Multisig_Timelock_2Sigs } estimatedSize := EstimateSerializeSize(len(ins), tx.TxOut, false, txType) fee := estimatedSize * int(feePerByte) if len(tx.TxOut) > 0 { feePerOutput := fee / len(tx.TxOut) for _, output := range tx.TxOut { output.Value -= int64(feePerOutput) } } // BIP 69 sorting txsort.InPlaceSort(tx) signingKey, err := key.ECPrivKey() if err != nil { return sigs, err } hashes := txscript.NewTxSigHashes(tx) for i := range tx.TxIn { sig, err := txscript.RawTxInWitnessSignature(tx, hashes, i, ins[i].Value, redeemScript, txscript.SigHashAll, signingKey) if err != nil { continue } bs := wi.Signature{InputIndex: uint32(i), Signature: sig} sigs = append(sigs, bs) } return sigs, nil } func (w *BitcoinWallet) multisign(ins []wi.TransactionInput, outs []wi.TransactionOutput, sigs1 []wi.Signature, sigs2 []wi.Signature, redeemScript []byte, feePerByte uint64, broadcast bool) ([]byte, error) { tx := wire.NewMsgTx(1) for _, in := range ins { ch, err := chainhash.NewHashFromStr(hex.EncodeToString(in.OutpointHash)) if err != nil { return nil, err } outpoint := wire.NewOutPoint(ch, in.OutpointIndex) input := wire.NewTxIn(outpoint, []byte{}, [][]byte{}) tx.TxIn = append(tx.TxIn, input) } for _, out := range outs { scriptPubKey, err := txscript.PayToAddrScript(out.Address) if err != nil { return nil, err } output := wire.NewTxOut(out.Value, scriptPubKey) tx.TxOut = append(tx.TxOut, output) } // Subtract fee txType := P2SH_2of3_Multisig _, err := spvwallet.LockTimeFromRedeemScript(redeemScript) if err == nil { txType = P2SH_Multisig_Timelock_2Sigs } estimatedSize := EstimateSerializeSize(len(ins), tx.TxOut, false, txType) fee := estimatedSize * int(feePerByte) if len(tx.TxOut) > 0 { feePerOutput := fee / len(tx.TxOut) for _, output := range tx.TxOut { output.Value -= int64(feePerOutput) } } // BIP 69 sorting txsort.InPlaceSort(tx) // Check if time locked var timeLocked bool if redeemScript[0] == txscript.OP_IF { timeLocked = true } for i, input := range tx.TxIn { var sig1 []byte var sig2 []byte for _, sig := range sigs1 { if int(sig.InputIndex) == i { sig1 = sig.Signature break } } for _, sig := range sigs2 { if int(sig.InputIndex) == i { sig2 = sig.Signature break } } witness := wire.TxWitness{[]byte{}, sig1, sig2} if timeLocked { witness = append(witness, []byte{0x01}) } witness = append(witness, redeemScript) input.Witness = witness } // broadcast if broadcast { if err := w.Broadcast(tx); err != nil { return nil, err } } var buf bytes.Buffer tx.BtcEncode(&buf, wire.ProtocolVersion, wire.WitnessEncoding) return buf.Bytes(), nil } func (w *BitcoinWallet) generateMultisigScript(keys []hd.ExtendedKey, threshold int, timeout time.Duration, timeoutKey *hd.ExtendedKey) (addr btc.Address, redeemScript []byte, err error) { if uint32(timeout.Hours()) > 0 && timeoutKey == nil { return nil, nil, errors.New("Timeout key must be non nil when using an escrow timeout") } if len(keys) < threshold { return nil, nil, fmt.Errorf("unable to generate multisig script with "+ "%d required signatures when there are only %d public "+ "keys available", threshold, len(keys)) } var ecKeys []*btcec.PublicKey for _, key := range keys { ecKey, err := key.ECPubKey() if err != nil { return nil, nil, err } ecKeys = append(ecKeys, ecKey) } builder := txscript.NewScriptBuilder() if uint32(timeout.Hours()) == 0 { builder.AddInt64(int64(threshold)) for _, key := range ecKeys { builder.AddData(key.SerializeCompressed()) } builder.AddInt64(int64(len(ecKeys))) builder.AddOp(txscript.OP_CHECKMULTISIG) } else { ecKey, err := timeoutKey.ECPubKey() if err != nil { return nil, nil, err } sequenceLock := blockchain.LockTimeToSequence(false, uint32(timeout.Hours()*6)) builder.AddOp(txscript.OP_IF) builder.AddInt64(int64(threshold)) for _, key := range ecKeys { builder.AddData(key.SerializeCompressed()) } builder.AddInt64(int64(len(ecKeys))) builder.AddOp(txscript.OP_CHECKMULTISIG) builder.AddOp(txscript.OP_ELSE). AddInt64(int64(sequenceLock)). AddOp(txscript.OP_CHECKSEQUENCEVERIFY). AddOp(txscript.OP_DROP). AddData(ecKey.SerializeCompressed()). AddOp(txscript.OP_CHECKSIG). AddOp(txscript.OP_ENDIF) } redeemScript, err = builder.Script() if err != nil { return nil, nil, err } witnessProgram := sha256.Sum256(redeemScript) addr, err = btc.NewAddressWitnessScriptHash(witnessProgram[:], w.params) if err != nil { return nil, nil, err } return addr, redeemScript, nil } func (w *BitcoinWallet) estimateSpendFee(amount int64, feeLevel wi.FeeLevel) (uint64, error) { // Since this is an estimate we can use a dummy output address. Let's use a long one so we don't under estimate. addr, err := btc.DecodeAddress("bc1qxtq7ha2l5qg70atpwp3fus84fx3w0v2w4r2my7gt89ll3w0vnlgspu349h", w.params) if err != nil { return 0, err } tx, err := w.buildTx(amount, addr, feeLevel, nil) if err != nil { return 0, err } var outval int64 for _, output := range tx.TxOut { outval += output.Value } var inval int64 utxos, err := w.db.Utxos().GetAll() if err != nil { return 0, err } for _, input := range tx.TxIn { for _, utxo := range utxos { if utxo.Op.Hash.IsEqual(&input.PreviousOutPoint.Hash) && utxo.Op.Index == input.PreviousOutPoint.Index { inval += utxo.Value break } } } if inval < outval { return 0, errors.New("Error building transaction: inputs less than outputs") } return uint64(inval - outval), err }
newUnsignedTransaction
signal.go
// Package s6 allows Go programs to signal readiness to the s6[1] suite of system // supervision tools. This should be run in func main. // // [1]: http://skarnet.org/software/s6/index.html package s6 import ( "errors" "flag" "os" ) var ( ErrCantFindNotificationFD = errors.New("s6: can't find notification file descriptor") notificationFD = flag.Int("notification-fd", 0, "notification file descriptor") ) // Signal signals readiness to s6. // // See: http://skarnet.org/software/s6/notifywhenup.html func Signal() error {
var err error // If this is unset, we probably don't care about notifying s6. if *notificationFD == 0 { return nil } fout := os.NewFile(uintptr(*notificationFD), "s6-notification") if fout == nil { return ErrCantFindNotificationFD } defer fout.Close() _, err = fout.Write([]byte("\n")) return err }
api_op_UpdateFlowOutput.go
// Code generated by smithy-go-codegen DO NOT EDIT. package mediaconnect import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/mediaconnect/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Updates an existing flow output. func (c *Client) UpdateFlowOutput(ctx context.Context, params *UpdateFlowOutputInput, optFns ...func(*Options)) (*UpdateFlowOutputOutput, error) { if params == nil { params = &UpdateFlowOutputInput{} } result, metadata, err := c.invokeOperation(ctx, "UpdateFlowOutput", params, optFns, c.addOperationUpdateFlowOutputMiddlewares) if err != nil { return nil, err } out := result.(*UpdateFlowOutputOutput) out.ResultMetadata = metadata return out, nil } // The fields that you want to update in the output. type UpdateFlowOutputInput struct { // The flow that is associated with the output that you want to update. // // This member is required. FlowArn *string // The ARN of the output that you want to update. // // This member is required. OutputArn *string // The range of IP addresses that should be allowed to initiate output requests to // this flow. These IP addresses should be in the form of a Classless Inter-Domain // Routing (CIDR) block; for example, 10.0.0.0/16. CidrAllowList []string // A description of the output. This description appears only on the AWS Elemental // MediaConnect console and will not be seen by the end user. Description *string // The IP address where you want to send the output. Destination *string // The type of key used for the encryption. If no keyType is provided, the service // will use the default setting (static-key). Encryption *types.UpdateEncryption // The maximum latency in milliseconds for Zixi-based streams. MaxLatency int32 // The media streams that are associated with the output, and the parameters for // those associations. MediaStreamOutputConfigurations []types.MediaStreamOutputConfigurationRequest // The minimum latency in milliseconds for SRT-based streams. In streams that use // the SRT protocol, this value that you set on your MediaConnect source or output // represents the minimal potential latency of that connection. The latency of the // stream is set to the highest number between the sender’s minimum latency and the // receiver’s minimum latency. MinLatency int32 // The port to use when content is distributed to this output. Port int32 // The protocol to use for the output. Protocol types.Protocol // The remote ID for the Zixi-pull stream. RemoteId *string // The smoothing latency in milliseconds for RIST, RTP, and RTP-FEC streams. SmoothingLatency int32 // The stream ID that you want to use for this transport. This parameter applies // only to Zixi-based streams. StreamId *string // The name of the VPC interface attachment to use for this output. VpcInterfaceAttachment *types.VpcInterfaceAttachment } type UpdateFlowOutputOutput struct { // The ARN of the flow that is associated with the updated output. FlowArn *string // The new settings of the output that you updated. Output *types.Output // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata } func (c *Client) addOperationUpdateFlowOutputMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestjson1_serializeOpUpdateFlowOutput{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestjson1_deserializeOpUpdateFlowOutput{}, middleware.After) if err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addOpUpdateFlowOutputValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateFlowOutput(options.Region), middleware.Before); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err
if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } return nil } func newServiceMetadataMiddleware_opUpdateFlowOutput(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "mediaconnect", OperationName: "UpdateFlowOutput", } }
}
extension.ts
import * as vscode from 'vscode'; const semver = require('semver'); const https = require('https'); const k8s = require('@kubernetes/client-node'); const path = require('path'); const os = require('os'); const LIBRARIES: { [platform: string]: [var_name: string, lib_name: string] } = { 'darwin': ['DYLD_INSERT_LIBRARIES', 'libmirrord_layer.dylib'], 'linux': ['LD_PRELOAD', 'libmirrord_layer.so'] }; const versionCheckEndpoint = 'https://version.mirrord.dev/get-latest-version'; let buttons: { toggle: vscode.StatusBarItem, settings: vscode.StatusBarItem }; let globalContext: vscode.ExtensionContext; let k8sApi: any; async function changeSettings() { let agentNamespace = globalContext.workspaceState.get<string>('agentNamespace', 'default'); let impersonatedPodNamespace = globalContext.workspaceState.get<string>('impersonatedPodNamespace', 'default'); const options = ['Change namespace for mirrord agent (current: ' + agentNamespace + ')', 'Change namespace for impersonated pod (current: ' + impersonatedPodNamespace + ')']; vscode.window.showQuickPick(options).then(async setting => { if (setting === undefined) { return; } if (setting.startsWith('Change namespace')) { let namespaces = await k8sApi.listNamespace(); let namespaceNames = namespaces.body.items.map((namespace: { metadata: { name: any; }; }) => { return namespace.metadata.name; }); vscode.window.showQuickPick(namespaceNames, { placeHolder: 'Select namespace' }).then(async namespaceName => { if (namespaceName === undefined) { return; } if (setting.startsWith('Change namespace for mirrord agent')) { globalContext.workspaceState.update('agentNamespace', namespaceName); } else if (setting.startsWith('Change namespace for impersonated pod')) { globalContext.workspaceState.update('impersonatedPodNamespace', namespaceName); } }); }
async function toggle(state: vscode.Memento, button: vscode.StatusBarItem) { if (state.get('enabled')) { // vscode.debug.registerDebugConfigurationProvider('*', new ConfigurationProvider(), 2); state.update('enabled', false); button.text = 'Enable mirrord'; } else { state.update('enabled', true); button.text = 'Disable mirrord'; } } async function checkVersion(version: string) { let versionUrl = versionCheckEndpoint + '?source=1&version=' + version; https.get(versionUrl, (res: any) => { res.on('data', (d: any) => { const config = vscode.workspace.getConfiguration(); if (config.get('mirrord.promptOutdated') !== false) { if (semver.lt(version, d.toString())) { vscode.window.showInformationMessage('Your version of mirrord is outdated, you should update.', 'Update', "Don't show again").then(item => { if (item === 'Update') { vscode.env.openExternal(vscode.Uri.parse('vscode:extension/MetalBear.mirrord')); } else if (item === "Don't show again") { config.update('mirrord.promptOutdated', false); } }); } } }); }).on('error', (e: any) => { console.error(e); }); } // this method is called when your extension is activated // your extension is activated the very first time the command is executed export async function activate(context: vscode.ExtensionContext) { // TODO: Download mirrord according to platform checkVersion(context.extension.packageJSON.version); globalContext = context; let k8sConfig = new k8s.KubeConfig(); k8sConfig.loadFromDefault(); k8sApi = k8sConfig.makeApiClient(k8s.CoreV1Api); context.globalState.update('enabled', false); vscode.debug.registerDebugConfigurationProvider('*', new ConfigurationProvider(), 2); buttons = { toggle: vscode.window.createStatusBarItem(vscode.StatusBarAlignment.Left, 0), settings: vscode.window.createStatusBarItem(vscode.StatusBarAlignment.Left, 0) }; const toggleCommandId = 'mirrord.toggleMirroring'; context.subscriptions.push(vscode.commands.registerCommand(toggleCommandId, async function () { toggle(context.globalState, buttons.toggle); })); buttons.toggle.text = 'Enable mirrord'; buttons.toggle.command = toggleCommandId; const settingsCommandId = 'mirrord.changeSettings'; context.subscriptions.push(vscode.commands.registerCommand(settingsCommandId, changeSettings)); buttons.settings.text = '$(gear)'; buttons.settings.command = settingsCommandId; for (const button of Object.values(buttons)) { context.subscriptions.push(button); button.show(); }; } class ConfigurationProvider implements vscode.DebugConfigurationProvider { async resolveDebugConfiguration(folder: vscode.WorkspaceFolder | undefined, config: vscode.DebugConfiguration, token: vscode.CancellationToken): Promise<vscode.DebugConfiguration | null | undefined> { if (!globalContext.globalState.get('enabled')) { return new Promise(resolve => { resolve(config); }); } if (config.__parentId) { // For some reason resolveDebugConfiguration runs twice for Node projects. __parentId is populated. return new Promise(resolve => { return resolve(config); }); } const namespace = globalContext.workspaceState.get<string>('namespace', 'default'); // Get pods from kubectl and let user select one to mirror let pods = await k8sApi.listNamespacedPod(namespace); let podNames = pods.body.items.map((pod: { metadata: { name: any; }; }) => { return pod.metadata.name; }); return await vscode.window.showQuickPick(podNames, { placeHolder: 'Select pod to mirror' }).then(async podName => { return new Promise(resolve => { console.log(config); const namespace = globalContext.workspaceState.get<string>('namespace', 'default'); // Get pods from kubectl and let user select one to mirror if (k8sApi === null) { return; } let libraryPath; if (globalContext.extensionMode === vscode.ExtensionMode.Development) { libraryPath = path.join(path.dirname(globalContext.extensionPath), "target", "debug"); } else { libraryPath = globalContext.extensionPath; } let [environmentVariableName, libraryName] = LIBRARIES[os.platform()]; config.env = { ...config.env, ...{ // eslint-disable-next-line @typescript-eslint/naming-convention 'MIRRORD_AGENT_IMPERSONATED_POD_NAME': podName } }; config.env[environmentVariableName] = path.join(libraryPath, libraryName); return resolve(config); }); }); } }
}); }
local.go
package desync import ( "context" "crypto/sha512" "errors" "fmt" "io/ioutil" "os" "path/filepath" "strings" "sync" "time" ) const chunkFileExt = ".cacnk" // LocalStore casync store type LocalStore struct { Base string // When accessing chunks, should mtime be updated? Useful when this is // a cache. Old chunks can be identified and removed from the store that way UpdateTimes bool } // NewLocalStore creates an instance of a local castore, it only checks presence // of the store func NewLocalStore(dir string) (LocalStore, error)
// GetChunk reads and returns one (compressed!) chunk from the store func (s LocalStore) GetChunk(id ChunkID) ([]byte, error) { sID := id.String() p := filepath.Join(s.Base, sID[0:4], sID) + chunkFileExt if _, err := os.Stat(p); err != nil { return nil, ChunkMissing{id} } if s.UpdateTimes { now := time.Now() if err := os.Chtimes(p, now, now); err != nil { return nil, err } } return ioutil.ReadFile(p) } // RemoveChunk deletes a chunk, typically an invalid one, from the filesystem. // Used when verifying and repairing caches. func (s LocalStore) RemoveChunk(id ChunkID) error { sID := id.String() p := filepath.Join(s.Base, sID[0:4], sID) + chunkFileExt if _, err := os.Stat(p); err != nil { return ChunkMissing{id} } return os.Remove(p) } // StoreChunk adds a new chunk to the store func (s LocalStore) StoreChunk(id ChunkID, b []byte) error { sID := id.String() d := filepath.Join(s.Base, sID[0:4]) if err := os.MkdirAll(d, 0755); err != nil { return err } tmpfile, err := ioutil.TempFile(d, ".tmp-cacnk") if err != nil { return err } tmpfile.Close() defer os.Remove(tmpfile.Name()) // in case we don't get to the rename, clean up if err = ioutil.WriteFile(tmpfile.Name(), b, 0644); err != nil { return err } p := filepath.Join(d, sID) + chunkFileExt return os.Rename(tmpfile.Name(), p) } // Verify all chunks in the store. If repair is set true, bad chunks are deleted. // n determines the number of concurrent operations. func (s LocalStore) Verify(ctx context.Context, n int, repair bool) error { var wg sync.WaitGroup ids := make(chan ChunkID) // Start the workers for i := 0; i < n; i++ { wg.Add(1) go func() { for id := range ids { err := s.verifyChunk(id) switch err.(type) { case ChunkInvalid: // bad chunk, report and delete (if repair=true) msg := err.Error() if repair { if err = s.RemoveChunk(id); err != nil { msg = msg + ":" + err.Error() } else { msg = msg + ": removed" } } fmt.Fprintln(os.Stderr, msg) case nil: // all good, move to the next default: // unexpected, print the error and carry on fmt.Fprintln(os.Stderr, err) } } wg.Done() }() } // Go trough all chunks underneath Base, filtering out other files, then feed // the IDs to the workers err := filepath.Walk(s.Base, func(path string, info os.FileInfo, err error) error { // See if we're meant to stop select { case <-ctx.Done(): return errors.New("interrupted") default: } if err != nil { // failed to walk? => fail return err } if info.IsDir() { // Skip dirs return nil } if !strings.HasSuffix(path, chunkFileExt) { // Skip files without chunk extension return nil } // Convert the name into a checksum, if that fails we're probably not looking // at a chunk file and should skip it. id, err := ChunkIDFromString(strings.TrimSuffix(filepath.Base(path), ".cacnk")) if err != nil { return nil } // Feed the workers ids <- id return nil }) close(ids) wg.Wait() return err } // Prune removes any chunks from the store that are not contained in a list // of chunks func (s LocalStore) Prune(ctx context.Context, ids map[ChunkID]struct{}) error { // Go trough all chunks underneath Base, filtering out other directories and files err := filepath.Walk(s.Base, func(path string, info os.FileInfo, err error) error { // See if we're meant to stop select { case <-ctx.Done(): return errors.New("interrupted") default: } if err != nil { // failed to walk? => fail return err } if info.IsDir() { // Skip dirs return nil } if !strings.HasSuffix(path, chunkFileExt) { // Skip files without chunk extension return nil } // Convert the name into a checksum, if that fails we're probably not looking // at a chunk file and should skip it. id, err := ChunkIDFromString(strings.TrimSuffix(filepath.Base(path), ".cacnk")) if err != nil { return nil } // See if the chunk we're looking at is in the list we want to keep, if not // remove it. if _, ok := ids[id]; !ok { if err = s.RemoveChunk(id); err != nil { return err } } return nil }) return err } // Unpack a chunk, calculate the checksum of its content and return nil if // they match. func (s LocalStore) verifyChunk(id ChunkID) error { b, err := s.GetChunk(id) if err != nil { return err } // The the chunk is compressed. Decompress it here db, err := Decompress(nil, b) if err != nil { return err } // Verify the checksum of the chunk matches the ID sum := sha512.Sum512_256(db) if sum != id { return ChunkInvalid{ID: id, Sum: sum} } return nil } // HasChunk returns true if the chunk is in the store func (s LocalStore) HasChunk(id ChunkID) bool { sID := id.String() p := filepath.Join(s.Base, sID[0:4], sID) + chunkFileExt if _, err := os.Stat(p); err == nil { return true } return false } func (s LocalStore) String() string { return s.Base }
{ info, err := os.Stat(dir) if err != nil { return LocalStore{}, err } if !info.IsDir() { return LocalStore{}, fmt.Errorf("%s is not a directory", dir) } return LocalStore{Base: dir}, nil }
address.js
import { dispatch as d3_dispatch } from 'd3-dispatch'; import { select as d3_select } from 'd3-selection'; import * as countryCoder from '@ideditor/country-coder'; import { geoExtent, geoChooseEdge, geoSphericalDistance } from '../../geo'; import { uiCombobox } from '../combobox'; import { utilArrayUniqBy, utilGetSetValue, utilNoAuto, utilRebind } from '../../util'; import { t } from '../../util/locale'; export function uiFieldAddress(field, context) { var dispatch = d3_dispatch('init', 'change'); var wrap = d3_select(null); var addrField = context.presets().field('address'); // needed for placeholder strings var _isInitialized = false; var _entityIDs = []; var _tags; var _countryCode; var _addressFormats = [{ format: [ ['housenumber', 'street'], ['city', 'postcode'] ] }]; context.data().get('address_formats') .then(function(d) { _addressFormats = d; }) .catch(function() { /* ignore */ }); function getNearStreets() { var extent = combinedEntityExtent(); var l = extent.center(); var box = geoExtent(l).padByMeters(200); var streets = context.intersects(box) .filter(isAddressable) .map(function(d) { var loc = context.projection([ (extent[0][0] + extent[1][0]) / 2, (extent[0][1] + extent[1][1]) / 2 ]); var choice = geoChooseEdge(context.childNodes(d), loc, context.projection); return { title: d.tags.name, value: d.tags.name, dist: choice.distance }; }) .sort(function(a, b) { return a.dist - b.dist; }); return utilArrayUniqBy(streets, 'value'); function isAddressable(d) { return d.tags.highway && d.tags.name && d.type === 'way'; } } function getNearCities() { var extent = combinedEntityExtent(); var l = extent.center(); var box = geoExtent(l).padByMeters(200); var cities = context.intersects(box) .filter(isAddressable) .map(function(d) { return { title: d.tags['addr:city'] || d.tags.name, value: d.tags['addr:city'] || d.tags.name, dist: geoSphericalDistance(d.extent(context.graph()).center(), l) }; }) .sort(function(a, b) { return a.dist - b.dist; }); return utilArrayUniqBy(cities, 'value'); function isAddressable(d) { if (d.tags.name) { if (d.tags.admin_level === '8' && d.tags.boundary === 'administrative') return true; if (d.tags.border_type === 'city') return true; if (d.tags.place === 'city' || d.tags.place === 'town' || d.tags.place === 'village') return true; } if (d.tags['addr:city']) return true; return false; } } function getNearValues(key) { var extent = combinedEntityExtent(); var l = extent.center(); var box = geoExtent(l).padByMeters(200); var results = context.intersects(box) .filter(function hasTag(d) { return _entityIDs.indexOf(d.id) === -1 && d.tags[key]; }) .map(function(d) { return { title: d.tags[key], value: d.tags[key], dist: geoSphericalDistance(d.extent(context.graph()).center(), l) }; }) .sort(function(a, b) { return a.dist - b.dist; }); return utilArrayUniqBy(results, 'value'); } function updateForCountryCode() { if (!_countryCode) return; var addressFormat; for (var i = 0; i < _addressFormats.length; i++) { var format = _addressFormats[i]; if (!format.countryCodes) { addressFormat = format; // choose the default format, keep going } else if (format.countryCodes.indexOf(_countryCode) !== -1) { addressFormat = format; // choose the country format, stop here break; } } var dropdowns = addressFormat.dropdowns || [ 'city', 'county', 'country', 'district', 'hamlet', 'neighbourhood', 'place', 'postcode', 'province', 'quarter', 'state', 'street', 'subdistrict', 'suburb' ]; var widths = addressFormat.widths || { housenumber: 1/3, street: 2/3, city: 2/3, state: 1/4, postcode: 1/3 }; function row(r) { // Normalize widths. var total = r.reduce(function(sum, key) { return sum + (widths[key] || 0.5); }, 0); return r.map(function(key) { return { id: key, width: (widths[key] || 0.5) / total }; }); } wrap.selectAll('.addr-row') .data(addressFormat.format) .enter() .append('div') .attr('class', 'addr-row') .selectAll('input') .data(row) .enter() .append('input') .property('type', 'text') .call(updatePlaceholder) .attr('maxlength', context.maxCharsForTagValue()) .attr('class', function (d) { return 'addr-' + d.id; }) .call(utilNoAuto) .each(addDropdown) .style('width', function (d) { return d.width * 100 + '%'; }); function
(d) { if (dropdowns.indexOf(d.id) === -1) return; // not a dropdown var nearValues = (d.id === 'street') ? getNearStreets : (d.id === 'city') ? getNearCities : getNearValues; d3_select(this) .call(uiCombobox(context, 'address-' + d.id) .minItems(1) .caseSensitive(true) .fetcher(function(value, callback) { callback(nearValues('addr:' + d.id)); }) ); } wrap.selectAll('input') .on('blur', change()) .on('change', change()); wrap.selectAll('input:not(.combobox-input)') .on('input', change(true)); dispatch.call('init'); _isInitialized = true; } function address(selection) { _isInitialized = false; wrap = selection.selectAll('.form-field-input-wrap') .data([0]); wrap = wrap.enter() .append('div') .attr('class', 'form-field-input-wrap form-field-input-' + field.type) .merge(wrap); var extent = combinedEntityExtent(); if (extent) { var countryCode; if (context.inIntro()) { // localize the address format for the walkthrough countryCode = t('intro.graph.countrycode'); } else { var center = extent.center(); countryCode = countryCoder.iso1A2Code(center); } if (countryCode) { _countryCode = countryCode.toLowerCase(); updateForCountryCode(); } } } function change(onInput) { return function() { var tags = {}; wrap.selectAll('input') .each(function (subfield) { var key = field.key + ':' + subfield.id; // don't override multiple values with blank string if (Array.isArray(_tags[key]) && !this.value) return; tags[key] = this.value || undefined; }); dispatch.call('change', this, tags, onInput); }; } function updatePlaceholder(inputSelection) { return inputSelection.attr('placeholder', function(subfield) { if (_tags && Array.isArray(_tags[field.key + ':' + subfield.id])) { return t('inspector.multiple_values'); } if (_countryCode) { var localkey = subfield.id + '!' + _countryCode; var tkey = addrField.strings.placeholders[localkey] ? localkey : subfield.id; return addrField.t('placeholders.' + tkey); } }); } function updateTags(tags) { utilGetSetValue(wrap.selectAll('input'), function (subfield) { var val = tags[field.key + ':' + subfield.id]; return typeof val === 'string' ? val : ''; }) .attr('title', function(subfield) { var val = tags[field.key + ':' + subfield.id]; return val && Array.isArray(val) && val.filter(Boolean).join('\n'); }) .classed('mixed', function(subfield) { return Array.isArray(tags[field.key + ':' + subfield.id]); }) .call(updatePlaceholder); } function combinedEntityExtent() { return _entityIDs && _entityIDs.length && _entityIDs.reduce(function(extent, entityID) { var entity = context.graph().entity(entityID); return extent.extend(entity.extent(context.graph())); }, geoExtent()); } address.entityIDs = function(val) { if (!arguments.length) return _entityIDs; _entityIDs = val; return address; }; address.tags = function(tags) { _tags = tags; if (_isInitialized) { updateTags(tags); } else { dispatch.on('init', function () { dispatch.on('init', null); updateTags(tags); }); } }; address.focus = function() { var node = wrap.selectAll('input').node(); if (node) node.focus(); }; return utilRebind(address, dispatch, 'on'); }
addDropdown
helpers.ts
import { spawn as _spawn, type SpawnOptions } from "child_process"; /** Promisify Spawn processes */ export async function
( command: string, args: string[] = [], options: SpawnOptions = {} ): Promise<number | null> { return new Promise<number | null>(function (resolve, reject) { const p = _spawn(command, args, { stdio: "inherit", ...options }); p.on("exit", resolve); p.on("error", reject); }); } /** Header component */ export function Header() { const title = "DPC Nordics CLI"; const line = Array(title.length).fill("─").join(""); console.clear(); console.log(title); console.log(line); }
spawn
helloWorld.js
// listen for Shoutem initialization complete document.addEventListener('shoutemready', onShoutemReady, false); // handler for Shoutem initialization finished function onShoutemReady(event) { // config object containing builder extension configuration, can be accessed via event // or by shoutem.sandbox.config const config = event.detail.config; // Waiting for DOM to be ready to initialize shoutem.api and call app start function $(document).ready(function() { shoutem.api.init(config.context); onPageReady(config); }); }; // Put your settings page logic here, executes when sandbox and DOM are initalized function onPageReady(config) { function errorHandler(err) { console.log('Something went wrong:', err); } function handleSubmit(e) { // prevent default action and bubbling e.preventDefault(); e.stopPropagation(); const greeting = $('#greeting').val(); // updates current shortcut settings by patching with current settings shoutem.api.shortcuts.updateSettings({ greeting: greeting }) .catch(errorHandler); return false; } function initForm(settings) { if(!settings) {
} $('button[type="submit"]').click(handleSubmit); // shoutem.api knows current shortcut and returns promise with fetched settings shoutem.api.shortcuts.getSettings() .then(initForm, errorHandler); }
return; } $('#greeting').val(settings.greeting);
go_nfq_test.go
// Copyright (C) 2015 Martin Garton <[email protected]> package nfq import ( "net" "testing" "time" ) func TestNfq(t *testing.T) { gotpacket := make(chan struct{}, 16) cb := func(date []byte) Verdict { gotpacket <- struct{}{} return NF_ACCEPT } nfq, err := NewDefaultQueue(0, cb) if err != nil { t.Fatal(err) } l, err := net.ListenPacket("udp", "127.0.0.1:9999") if err != nil { t.Fatal(err) } defer l.Close() addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:9999") if err != nil { t.Fatalf("ResolveUDPAddr failed: %v", err) } if _, err := l.WriteTo([]byte{1, 2, 3}, addr); err != nil { t.Fatal(err) } <-gotpacket select { case <-gotpacket: t.Fatal("didn't expect another packet") default: } nfq.Close() } func TestCloseWhenWritingLots(t *testing.T) { for i := 0; i < 100; i++ { testCloseWhenWritingLots(t) } } // This tries to trigger a race during close that's hopefully fixed now and // previously triggered a SIGALRM in the C code. func testCloseWhenWritingLots(t *testing.T) { cb := func(date []byte) Verdict { return NF_ACCEPT } nfq, err := NewDefaultQueue(0, cb) if err != nil { t.Fatal(err) } l, err := net.ListenPacket("udp", "127.0.0.1:9999") if err != nil { t.Fatal(err) } defer l.Close() addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:9999") if err != nil { t.Fatalf("ResolveUDPAddr failed: %v", err) } closing := make(chan chan struct{}) go func() { for { select { case closed := <-closing: close(closed) return default: if _, err := l.WriteTo([]byte{1, 2, 3}, addr); err != nil { t.Fatal(err) } }
nfq.Close() closed := make(chan struct{}) closing <- closed <-closed }
} }() time.Sleep(5 * time.Microsecond)
add-shop-payment-routing-rule-dialog.module.ts
import { CommonModule } from '@angular/common'; import { NgModule } from '@angular/core'; import { FlexLayoutModule } from '@angular/flex-layout'; import { ReactiveFormsModule } from '@angular/forms'; import { MatAutocompleteModule } from '@angular/material/autocomplete'; import { MatButtonModule } from '@angular/material/button'; import { MatDialogModule } from '@angular/material/dialog'; import { MatDividerModule } from '@angular/material/divider'; import { MatFormFieldModule } from '@angular/material/form-field'; import { MatIconModule } from '@angular/material/icon'; import { MatInputModule } from '@angular/material/input'; import { MatRadioModule } from '@angular/material/radio'; import { MatSelectModule } from '@angular/material/select'; import { AddShopPaymentRoutingRuleDialogComponent } from './add-shop-payment-routing-rule-dialog.component'; import { ExpanderComponent } from './expander'; import { PredicateComponent } from './predicate'; @NgModule({ imports: [ CommonModule, MatButtonModule, FlexLayoutModule, MatDialogModule, MatDividerModule, ReactiveFormsModule, MatFormFieldModule, MatInputModule, MatIconModule, MatSelectModule, MatRadioModule, MatAutocompleteModule, ], declarations: [AddShopPaymentRoutingRuleDialogComponent, PredicateComponent, ExpanderComponent], exports: [AddShopPaymentRoutingRuleDialogComponent], }) export class
{}
AddShopPaymentRoutingRuleDialogModule
embeds.py
import discord from discord.embeds import EmptyEmbed class CustomEmbeds: confirm_path = 'https://raw.githubusercontent.com/davisschenk/Unnamed-Bot/master/images/ConfirmIcon.png?token=AIRYAKQHGVMHHBQ73J7G2AK5FKHRK' add_path = 'https://raw.githubusercontent.com/davisschenk/Unnamed-Bot/master/images/AddIcon.png?token=AIRYAKXEWWR4CFXQTWXSAR25FKHI2' remove_path = 'https://raw.githubusercontent.com/davisschenk/Unnamed-Bot/master/images/MinusIcon.png?token=AIRYAKTRTDTZ4R54N5DN6EC5FKHTU' question_path = 'https://raw.githubusercontent.com/davisschenk/Unnamed-Bot/master/images/QuestionIcon.png?token=AIRYAKXDB5BW4TAKDGPRFI25FKHU6' info_path = 'https://raw.githubusercontent.com/davisschenk/Unnamed-Bot/master/images/InfoIcon.png?token=AIRYAKVHWJUI5UE22CS6QQC5FKHWI' @classmethod def confirm(cls, **kwargs): title = kwargs.get('title', EmptyEmbed) description = kwargs.get('description', EmptyEmbed) url = kwargs.get('url', EmptyEmbed) color = kwargs.get('color', discord.Color.from_rgb(46, 204, 113)) author = kwargs.get('author', 'Confirm') embed = discord.Embed(title=title, description=description, url=url, color=color) embed.set_author(name=author, icon_url=cls.confirm_path) return embed @classmethod def add(cls, **kwargs): title = kwargs.get('title', EmptyEmbed) description = kwargs.get('description', EmptyEmbed) url = kwargs.get('url', EmptyEmbed) color = kwargs.get('color', discord.Color.from_rgb(46, 204, 113)) author = kwargs.get('author', 'Add') embed = discord.Embed(title=title, description=description, url=url, color=color) embed.set_author(name=author, icon_url=cls.add_path) return embed @classmethod def remove(cls, **kwargs): title = kwargs.get('title', EmptyEmbed) description = kwargs.get('description', EmptyEmbed) url = kwargs.get('url', EmptyEmbed) color = kwargs.get('color', discord.Color.from_rgb(231, 76, 60)) author = kwargs.get('author', 'Remove') embed = discord.Embed(title=title, description=description, url=url, color=color) embed.set_author(name=author, icon_url=cls.remove_path) return embed @classmethod def question(cls, **kwargs): title = kwargs.get('title', EmptyEmbed) description = kwargs.get('description', EmptyEmbed) url = kwargs.get('url', EmptyEmbed) color = kwargs.get('color', discord.Color.from_rgb(52, 152, 219)) author = kwargs.get('author', 'Question') embed = discord.Embed(title=title, description=description, url=url, color=color) embed.set_author(name=author, icon_url=cls.question_path) return embed @classmethod def info(cls, **kwargs): title = kwargs.get('title', EmptyEmbed) description = kwargs.get('description', EmptyEmbed) url = kwargs.get('url', EmptyEmbed) color = kwargs.get('color', discord.Color.from_rgb(52, 152, 219)) author = kwargs.get('author', 'Info') embed = discord.Embed(title=title, description=description, url=url, color=color) embed.set_author(name=author, icon_url=cls.info_path) return embed @classmethod def starboard(cls, message, **kwargs): title = kwargs.get('title', EmptyEmbed) description = kwargs.get('description', f'{message.content}\n [Jump To](https://discordapp.com/channels/{message.guild.id}/{message.channel.id}/{message.id})') url = kwargs.get('url', EmptyEmbed) color = kwargs.get('color', 13103696) author = kwargs.get('author', ':star: Starboard :star:') embed = discord.Embed(color=color, description=description)
embed.set_footer(text=f"Author: {message.author}", icon_url=message.author.avatar_url) return embed
Title.tsx
export const Title: React.FC = ({ children }) => ( <section className="border-b bg-marp-brand text-white py-3">
& :global(a), & :global(a:hover), & :global(a:hover:active) { @apply no-underline text-current; } & :global(a:focus-visible) { @apply underline outline-none; } `}</style> </h1> </section> )
<h1 className="text-3xl font-bold text-center font-rounded uppercase"> {children} <style jsx>{`
long_mpc.py
import os import math import cereal.messaging as messaging from common.numpy_fast import clip, interp from selfdrive.swaglog import cloudlog from common.realtime import sec_since_boot from selfdrive.controls.lib.radar_helpers import _LEAD_ACCEL_TAU from selfdrive.controls.lib.longitudinal_mpc import libmpc_py from selfdrive.controls.lib.drive_helpers import MPC_COST_LONG LOG_MPC = os.environ.get('LOG_MPC', False) class LongitudinalMpc(): def
(self, mpc_id): self.mpc_id = mpc_id self.setup_mpc() self.v_mpc = 0.0 self.v_mpc_future = 0.0 self.a_mpc = 0.0 self.v_cruise = 0.0 self.prev_lead_status = False self.prev_lead_x = 0.0 self.new_lead = False self.last_cloudlog_t = 0.0 self.n_its = 0 self.duration = 0 # scc smoother self.cruise_gap = 0 def publish(self, pm): if LOG_MPC: qp_iterations = max(0, self.n_its) dat = messaging.new_message('liveLongitudinalMpc') dat.liveLongitudinalMpc.xEgo = list(self.mpc_solution[0].x_ego) dat.liveLongitudinalMpc.vEgo = list(self.mpc_solution[0].v_ego) dat.liveLongitudinalMpc.aEgo = list(self.mpc_solution[0].a_ego) dat.liveLongitudinalMpc.xLead = list(self.mpc_solution[0].x_l) dat.liveLongitudinalMpc.vLead = list(self.mpc_solution[0].v_l) dat.liveLongitudinalMpc.cost = self.mpc_solution[0].cost dat.liveLongitudinalMpc.aLeadTau = self.a_lead_tau dat.liveLongitudinalMpc.qpIterations = qp_iterations dat.liveLongitudinalMpc.mpcId = self.mpc_id dat.liveLongitudinalMpc.calculationTime = self.duration pm.send('liveLongitudinalMpc', dat) def setup_mpc(self): ffi, self.libmpc = libmpc_py.get_libmpc(self.mpc_id) self.libmpc.init(MPC_COST_LONG.TTC, MPC_COST_LONG.DISTANCE, MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK) self.mpc_solution = ffi.new("log_t *") self.cur_state = ffi.new("state_t *") self.cur_state[0].v_ego = 0 self.cur_state[0].a_ego = 0 self.a_lead_tau = _LEAD_ACCEL_TAU def set_cur_state(self, v, a): self.cur_state[0].v_ego = v self.cur_state[0].a_ego = a def update(self, CS, lead): v_ego = CS.vEgo # Setup current mpc state self.cur_state[0].x_ego = 0.0 if lead is not None and lead.status: x_lead = max(0, lead.dRel - 0.5) v_lead = max(0.0, lead.vLead) a_lead = lead.aLeadK if (v_lead < 0.1 or -a_lead / 2.0 > v_lead): v_lead = 0.0 a_lead = 0.0 self.a_lead_tau = max(lead.aLeadTau, (a_lead ** 2 * math.pi) / (2 * (v_lead + 0.01) ** 2)) self.new_lead = False if not self.prev_lead_status or abs(x_lead - self.prev_lead_x) > 2.5: self.libmpc.init_with_simulation(self.v_mpc, x_lead, v_lead, a_lead, self.a_lead_tau) self.new_lead = True self.prev_lead_status = True self.prev_lead_x = x_lead self.cur_state[0].x_l = x_lead self.cur_state[0].v_l = v_lead else: self.prev_lead_status = False # Fake a fast lead car, so mpc keeps running self.cur_state[0].x_l = 50.0 self.cur_state[0].v_l = v_ego + 10.0 a_lead = 0.0 self.a_lead_tau = _LEAD_ACCEL_TAU # Calculate mpc t = sec_since_boot() # scc smoother cruise_gap = int(clip(CS.cruiseGap, 1., 4.)) # TR = interp(float(cruise_gap), [1., 2., 3., 4.], [1.0, 1.3, 1.6, 2.0]) TR = interp(v_ego, [3., 30.], [1., 2.5]) if self.cruise_gap != cruise_gap: self.cruise_gap = cruise_gap self.n_its = self.libmpc.run_mpc(self.cur_state, self.mpc_solution, self.a_lead_tau, a_lead, TR) self.duration = int((sec_since_boot() - t) * 1e9) # Get solution. MPC timestep is 0.2 s, so interpolation to 0.05 s is needed self.v_mpc = self.mpc_solution[0].v_ego[1] self.a_mpc = self.mpc_solution[0].a_ego[1] self.v_mpc_future = self.mpc_solution[0].v_ego[10] # Reset if NaN or goes through lead car crashing = any(lead - ego < -50 for (lead, ego) in zip(self.mpc_solution[0].x_l, self.mpc_solution[0].x_ego)) nans = any(math.isnan(x) for x in self.mpc_solution[0].v_ego) backwards = min(self.mpc_solution[0].v_ego) < -0.01 if ((backwards or crashing) and self.prev_lead_status) or nans: if t > self.last_cloudlog_t + 5.0: self.last_cloudlog_t = t cloudlog.warning("Longitudinal mpc %d reset - backwards: %s crashing: %s nan: %s" % ( self.mpc_id, backwards, crashing, nans)) self.libmpc.init(MPC_COST_LONG.TTC, MPC_COST_LONG.DISTANCE, MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK) self.cur_state[0].v_ego = v_ego self.cur_state[0].a_ego = 0.0 self.v_mpc = v_ego self.a_mpc = CS.aEgo self.prev_lead_status = False
__init__
features.py
from django.db.models.aggregates import StdDev from django.db.utils import NotSupportedError, ProgrammingError from django.utils.functional import cached_property class BaseDatabaseFeatures: gis_enabled = False allows_group_by_pk = False allows_group_by_selected_pks = False empty_fetchmany_value = [] update_can_self_select = True # Does the backend distinguish between '' and None? interprets_empty_strings_as_nulls = False # Does the backend allow inserting duplicate NULL rows in a nullable # unique field? All core backends implement this correctly, but other # databases such as SQL Server do not. supports_nullable_unique_constraints = True # Does the backend allow inserting duplicate rows when a unique_together # constraint exists and some fields are nullable but not all of them? supports_partially_nullable_unique_constraints = True can_use_chunked_reads = True can_return_id_from_insert = False can_return_ids_from_bulk_insert = False has_bulk_insert = True uses_savepoints = False can_release_savepoints = False # If True, don't use integer foreign keys referring to, e.g., positive # integer primary keys. related_fields_match_type = False allow_sliced_subqueries_with_in = True has_select_for_update = False has_select_for_update_nowait = False has_select_for_update_skip_locked = False has_select_for_update_of = False # Does the database's SELECT FOR UPDATE OF syntax require a column rather # than a table? select_for_update_of_column = False # Does the default test database allow multiple connections? # Usually an indication that the test database is in-memory test_db_allows_multiple_connections = True # Can an object be saved without an explicit primary key? supports_unspecified_pk = False # Can a fixture contain forward references? i.e., are # FK constraints checked at the end of transaction, or # at the end of each save operation? supports_forward_references = True # Does the backend truncate names properly when they are too long? truncates_names = False # Is there a REAL datatype in addition to floats/doubles? has_real_datatype = False supports_subqueries_in_group_by = True # Is there a true datatype for uuid? has_native_uuid_field = False # Is there a true datatype for timedeltas? has_native_duration_field = False # Does the database driver supports same type temporal data subtraction # by returning the type used to store duration field? supports_temporal_subtraction = False # Does the __regex lookup support backreferencing and grouping? supports_regex_backreferencing = True # Can date/datetime lookups be performed using a string? supports_date_lookup_using_string = True # Can datetimes with timezones be used? supports_timezones = True # Does the database have a copy of the zoneinfo database? has_zoneinfo_database = True # When performing a GROUP BY, is an ORDER BY NULL required # to remove any ordering? requires_explicit_null_ordering_when_grouping = False # Does the backend order NULL values as largest or smallest? nulls_order_largest = False # The database's limit on the number of query parameters. max_query_params = None # Can an object have an autoincrement primary key of 0? MySQL says No. allows_auto_pk_0 = True # Do we need to NULL a ForeignKey out, or can the constraint check be # deferred can_defer_constraint_checks = False # date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas supports_mixed_date_datetime_comparisons = True # Does the backend support tablespaces? Default to False because it isn't # in the SQL standard. supports_tablespaces = False # Does the backend reset sequences between tests? supports_sequence_reset = True # Can the backend introspect the default value of a column? can_introspect_default = True # Confirm support for introspected foreign keys # Every database can do this reliably, except MySQL, # which can't do it for MyISAM tables can_introspect_foreign_keys = True # Can the backend introspect an AutoField, instead of an IntegerField? can_introspect_autofield = False # Can the backend introspect a BigIntegerField, instead of an IntegerField? can_introspect_big_integer_field = True # Can the backend introspect an BinaryField, instead of an TextField? can_introspect_binary_field = True # Can the backend introspect an DecimalField, instead of an FloatField? can_introspect_decimal_field = True # Can the backend introspect a DurationField, instead of a BigIntegerField? can_introspect_duration_field = True # Can the backend introspect an IPAddressField, instead of an CharField? can_introspect_ip_address_field = False # Can the backend introspect a PositiveIntegerField, instead of an IntegerField? can_introspect_positive_integer_field = False # Can the backend introspect a SmallIntegerField, instead of an IntegerField? can_introspect_small_integer_field = False # Can the backend introspect a TimeField, instead of a DateTimeField? can_introspect_time_field = True # Some backends may not be able to differentiate BooleanField from other # fields such as IntegerField. introspected_boolean_field_type = 'BooleanField' # Can the backend introspect the column order (ASC/DESC) for indexes? supports_index_column_ordering = True # Support for the DISTINCT ON clause can_distinct_on_fields = False # Does the backend decide to commit before SAVEPOINT statements # when autocommit is disabled? https://bugs.python.org/issue8145#msg109965 autocommits_when_autocommit_is_off = False # Does the backend prevent running SQL queries in broken transactions? atomic_transactions = True # Can we roll back DDL in a transaction? can_rollback_ddl = False # Does it support operations requiring references rename in a transaction? supports_atomic_references_rename = True # Can we issue more than one ALTER COLUMN clause in an ALTER TABLE? supports_combined_alters = False # Does it support foreign keys? supports_foreign_keys = True # Does it support CHECK constraints? supports_column_check_constraints = True supports_table_check_constraints = True # Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value}) # parameter passing? Note this can be provided by the backend even if not # supported by the Python driver supports_paramstyle_pyformat = True # Does the backend require literal defaults, rather than parameterized ones? requires_literal_defaults = False # Does the backend require a connection reset after each material schema change? connection_persists_old_columns = False # What kind of error does the backend throw when accessing closed cursor? closed_cursor_error_class = ProgrammingError # Does 'a' LIKE 'A' match? has_case_insensitive_like = True # Does the backend require the sqlparse library for splitting multi-line # statements before executing them? requires_sqlparse_for_splitting = True # Suffix for backends that don't support "SELECT xxx;" queries. bare_select_suffix = '' # If NULL is implied on columns without needing to be explicitly specified implied_column_null = False uppercases_column_names = False # Does the backend support "select for update" queries with limit (and offset)? supports_select_for_update_with_limit = True # Does the backend ignore null expressions in GREATEST and LEAST queries unless # every expression is null? greatest_least_ignores_nulls = False # Can the backend clone databases for parallel test execution? # Defaults to False to allow third-party backends to opt-in. can_clone_databases = False # Does the backend consider table names with different casing to # be equal? ignores_table_name_case = False # Place FOR UPDATE right after FROM clause. Used on MSSQL. for_update_after_from = False # Combinatorial flags supports_select_union = True supports_select_intersection = True supports_select_difference = True supports_slicing_ordering_in_compound = False # Does the database support SQL 2003 FILTER (WHERE ...) in aggregate # expressions? supports_aggregate_filter_clause = False # Does the backend support indexing a TextField? supports_index_on_text_field = True # Does the backed support window expressions (expression OVER (...))? supports_over_clause = False # Does the backend support CAST with precision? supports_cast_with_precision = True # How many second decimals does the database return when casting a value to # a type with time? time_cast_precision = 6 # SQL to create a procedure for use by the Django test suite. The # functionality of the procedure isn't important. create_test_procedure_without_params_sql = None create_test_procedure_with_int_param_sql = None # Does the backend support keyword parameters for cursor.callproc()? supports_callproc_kwargs = False # Convert CharField results from bytes to str in database functions. db_functions_convert_bytes_to_str = False # What formats does the backend EXPLAIN syntax support? supported_explain_formats = set() # Does DatabaseOperations.explain_query_prefix() raise ValueError if # unknown kwargs are passed to QuerySet.explain()? validates_explain_options = True # Does the backend support the default parameter in lead() and lag()? supports_default_in_lead_lag = True # Does the backend support ignoring constraint or uniqueness errors during # INSERT? supports_ignore_conflicts = True # Does this backend require casting the results of CASE expressions used # in UPDATE statements to ensure the expression has the correct type? requires_casted_case_in_updates = False def __init__(self, connection):
@cached_property def supports_explaining_query_execution(self): """Does this backend support explaining query execution?""" return self.connection.ops.explain_prefix is not None @cached_property def supports_transactions(self): """Confirm support for transactions.""" with self.connection.cursor() as cursor: cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)') self.connection.set_autocommit(False) cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)') self.connection.rollback() self.connection.set_autocommit(True) cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST') count, = cursor.fetchone() cursor.execute('DROP TABLE ROLLBACK_TEST') return count == 0 @cached_property def supports_stddev(self): """Confirm support for STDDEV and related stats functions.""" try: self.connection.ops.check_expression_support(StdDev(1)) except NotSupportedError: return False return True
self.connection = connection
volume.go
package goxtremio import xms "github.com/emccode/goxtremio/api/v3" type Volume *xms.Volume type NewVolumeOptions xms.PostVolumesReq type NewVolumeResult *xms.PostVolumesResp //GetVolume returns a specific volume by name or ID func (c *Client) GetVolume(id string, name string) (Volume, error) { volume, err := c.api.GetVolume(id, name) if err != nil
return volume.Content, nil } //GetVolumes returns a list of volumes func (c *Client) GetVolumes() (Refs, error) { volumes, err := c.api.GetVolumes() if err != nil { return nil, err } return volumes.Volumes, nil } //Constructs a new Volume instance func VolumeCtor() Volume { return &xms.Volume{} } //Constructs a new Volume instance func VolumeCtorNameIndex(name string, index int) Volume { return &xms.Volume{Name: name, Index: index} } //NewVolume creates a volume func (c *Client) NewVolume(opts *NewVolumeOptions) (NewVolumeResult, error) { req := xms.PostVolumesReq(*opts) return c.api.PostVolumes(&req) } //DeleteVolume deletes a volume func (c *Client) DeleteVolume(id string, name string) error { return c.api.DeleteVolumes(id, name) }
{ return nil, err }