file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
productsubscriptions.go
package apimanagement // Copyright (c) Microsoft and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // limitations under the License. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/validation" "net/http" ) // ProductSubscriptionsClient is the client for the ProductSubscriptions methods of the Apimanagement service. type ProductSubscriptionsClient struct { BaseClient } // NewProductSubscriptionsClient creates an instance of the ProductSubscriptionsClient client. func NewProductSubscriptionsClient() ProductSubscriptionsClient { return ProductSubscriptionsClient{New()} } // List lists the collection of subscriptions to the specified product. // // apimBaseURL is the management endpoint of the API Management service, for example // https://myapimservice.management.azure-api.net. productID is product identifier. Must be unique in the current // API Management service instance. filter is | Field | Supported operators | Supported functions // | // |--------------|------------------------|---------------------------------------------| // | id | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | // | name | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | // | stateComment | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | // | userId | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | // | productId | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | // | state | eq | | top is number of records // to return. skip is number of records to skip. func (client ProductSubscriptionsClient) List(ctx context.Context, apimBaseURL string, productID string, filter string, top *int32, skip *int32) (result SubscriptionCollectionPage, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: productID, Constraints: []validation.Constraint{{Target: "productID", Name: validation.MaxLength, Rule: 256, Chain: nil}, {Target: "productID", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "productID", Name: validation.Pattern, Rule: `^[^*#&+:<>?]+$`, Chain: nil}}}, {TargetValue: top, Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}}}}}, {TargetValue: skip, Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}}}}}}); err != nil { return result, validation.NewError("apimanagement.ProductSubscriptionsClient", "List", err.Error()) } result.fn = client.listNextResults req, err := client.ListPreparer(ctx, apimBaseURL, productID, filter, top, skip) if err != nil { err = autorest.NewErrorWithError(err, "apimanagement.ProductSubscriptionsClient", "List", nil, "Failure preparing request") return } resp, err := client.ListSender(req) if err != nil { result.sc.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "apimanagement.ProductSubscriptionsClient", "List", resp, "Failure sending request") return } result.sc, err = client.ListResponder(resp) if err != nil {
return } // ListPreparer prepares the List request. func (client ProductSubscriptionsClient) ListPreparer(ctx context.Context, apimBaseURL string, productID string, filter string, top *int32, skip *int32) (*http.Request, error) { urlParameters := map[string]interface{}{ "apimBaseUrl": apimBaseURL, } pathParameters := map[string]interface{}{ "productId": autorest.Encode("path", productID), } const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } if len(filter) > 0 { queryParameters["$filter"] = autorest.Encode("query", filter) } if top != nil { queryParameters["$top"] = autorest.Encode("query", *top) } if skip != nil { queryParameters["$skip"] = autorest.Encode("query", *skip) } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithCustomBaseURL("{apimBaseUrl}", urlParameters), autorest.WithPathParameters("/products/{productId}/subscriptions", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ProductSubscriptionsClient) ListSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always // closes the http.Response Body. func (client ProductSubscriptionsClient) ListResponder(resp *http.Response) (result SubscriptionCollection, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // listNextResults retrieves the next set of results, if any. func (client ProductSubscriptionsClient) listNextResults(lastResults SubscriptionCollection) (result SubscriptionCollection, err error) { req, err := lastResults.subscriptionCollectionPreparer() if err != nil { return result, autorest.NewErrorWithError(err, "apimanagement.ProductSubscriptionsClient", "listNextResults", nil, "Failure preparing next results request") } if req == nil { return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} return result, autorest.NewErrorWithError(err, "apimanagement.ProductSubscriptionsClient", "listNextResults", resp, "Failure sending next results request") } result, err = client.ListResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "apimanagement.ProductSubscriptionsClient", "listNextResults", resp, "Failure responding to next results request") } return } // ListComplete enumerates all values, automatically crossing page boundaries as required. func (client ProductSubscriptionsClient) ListComplete(ctx context.Context, apimBaseURL string, productID string, filter string, top *int32, skip *int32) (result SubscriptionCollectionIterator, err error) { result.page, err = client.List(ctx, apimBaseURL, productID, filter, top, skip) return }
err = autorest.NewErrorWithError(err, "apimanagement.ProductSubscriptionsClient", "List", resp, "Failure responding to request") }
active_user.go
package util import ( "context" "sync" "time" "go.uber.org/atomic" "github.com/cortexproject/cortex/pkg/util/services" ) // ActiveUsers keeps track of latest user's activity timestamp, // and allows purging users that are no longer active. type ActiveUsers struct { mu sync.RWMutex timestamps map[string]*atomic.Int64 // As long as unit used by Update and Purge is the same, it doesn't matter what it is. } func NewActiveUsers() *ActiveUsers
func (m *ActiveUsers) UpdateUserTimestamp(userID string, ts int64) { m.mu.RLock() u := m.timestamps[userID] m.mu.RUnlock() if u != nil { u.Store(ts) return } // Pre-allocate new atomic to avoid doing allocation with lock held. newAtomic := atomic.NewInt64(ts) // We need RW lock to create new entry. m.mu.Lock() u = m.timestamps[userID] if u != nil { // Unlock first to reduce contention. m.mu.Unlock() u.Store(ts) return } m.timestamps[userID] = newAtomic m.mu.Unlock() } // PurgeInactiveUsers removes users that were last active before given deadline, and returns removed users. func (m *ActiveUsers) PurgeInactiveUsers(deadline int64) []string { // Find inactive users with read-lock. m.mu.RLock() inactive := make([]string, 0, len(m.timestamps)) for userID, ts := range m.timestamps { if ts.Load() <= deadline { inactive = append(inactive, userID) } } m.mu.RUnlock() if len(inactive) == 0 { return nil } // Cleanup inactive users. for ix := 0; ix < len(inactive); { userID := inactive[ix] deleted := false m.mu.Lock() u := m.timestamps[userID] if u != nil && u.Load() <= deadline { delete(m.timestamps, userID) deleted = true } m.mu.Unlock() if deleted { // keep it in the output ix++ } else { // not really inactive, remove it from output inactive = append(inactive[:ix], inactive[ix+1:]...) } } return inactive } // ActiveUsersCleanupService tracks active users, and periodically purges inactive ones while running. type ActiveUsersCleanupService struct { services.Service activeUsers *ActiveUsers cleanupFunc func(string) inactiveTimeout time.Duration } func NewActiveUsersCleanupWithDefaultValues(cleanupFn func(string)) *ActiveUsersCleanupService { return NewActiveUsersCleanupService(3*time.Minute, 15*time.Minute, cleanupFn) } func NewActiveUsersCleanupService(cleanupInterval, inactiveTimeout time.Duration, cleanupFn func(string)) *ActiveUsersCleanupService { s := &ActiveUsersCleanupService{ activeUsers: NewActiveUsers(), cleanupFunc: cleanupFn, inactiveTimeout: inactiveTimeout, } s.Service = services.NewTimerService(cleanupInterval, nil, s.iteration, nil).WithName("active users cleanup") return s } func (s *ActiveUsersCleanupService) UpdateUserTimestamp(user string, now time.Time) { s.activeUsers.UpdateUserTimestamp(user, now.UnixNano()) } func (s *ActiveUsersCleanupService) iteration(_ context.Context) error { inactiveUsers := s.activeUsers.PurgeInactiveUsers(time.Now().Add(-s.inactiveTimeout).UnixNano()) for _, userID := range inactiveUsers { s.cleanupFunc(userID) } return nil }
{ return &ActiveUsers{ timestamps: map[string]*atomic.Int64{}, } }
seller.js
const User = require('./user'); class Seller extends User { }
module.exports = Seller;
series.rs
use crate::arrow_interop::to_rust::array_to_rust; use crate::dataframe::PyDataFrame; use crate::datatypes::PyDataType; use crate::error::PyPolarsEr; use crate::utils::str_to_polarstype; use crate::{arrow_interop, dispatch::ApplyLambda, npy::aligned_array, prelude::*}; use numpy::PyArray1; use polars::chunked_array::builder::get_bitmap; use pyo3::types::{PyList, PyTuple}; use pyo3::{exceptions::PyRuntimeError, prelude::*, Python}; use std::any::Any; use std::ops::{BitAnd, BitOr}; #[derive(Clone, Debug)] pub struct ObjectValue { inner: PyObject, } impl<'a> FromPyObject<'a> for ObjectValue { fn extract(ob: &'a PyAny) -> PyResult<Self> { let gil = Python::acquire_gil(); let python = gil.python(); Ok(ObjectValue { inner: ob.to_object(python), }) } } /// # Safety /// /// The caller is responsible for checking that val is Object otherwise UB impl From<&dyn Any> for &ObjectValue { fn from(val: &dyn Any) -> Self { unsafe { &*(val as *const dyn Any as *const ObjectValue) } } } impl ToPyObject for ObjectValue { fn to_object(&self, _py: Python) -> PyObject { self.inner.clone() } } impl Default for ObjectValue { fn default() -> Self { let gil = Python::acquire_gil(); let python = gil.python(); ObjectValue { inner: python.None(), } } } #[pyclass] #[repr(transparent)] #[derive(Clone)] pub struct PySeries { pub series: Series, } impl PySeries { pub(crate) fn new(series: Series) -> Self { PySeries { series } } } // Init with numpy arrays macro_rules! init_method { ($name:ident, $type:ty) => { #[pymethods] impl PySeries { #[staticmethod] pub fn $name(name: &str, val: &PyArray1<$type>) -> PySeries { unsafe { PySeries { series: Series::new(name, val.as_slice().unwrap()), } } } } }; } init_method!(new_i8, i8); init_method!(new_i16, i16); init_method!(new_i32, i32); init_method!(new_i64, i64); init_method!(new_bool, bool); init_method!(new_u8, u8); init_method!(new_u16, u16); init_method!(new_u32, u32); init_method!(new_u64, u64); init_method!(new_date32, i32); init_method!(new_date64, i64); init_method!(new_duration_ns, i64); init_method!(new_time_ns, i64); #[pymethods] impl PySeries { #[staticmethod] pub fn new_f32(name: &str, val: &PyArray1<f32>, nan_is_null: bool) -> PySeries { // numpy array as slice is unsafe unsafe { if nan_is_null { let mut ca: Float32Chunked = val .as_slice() .expect("contiguous array") .iter() .map(|&val| if f32::is_nan(val) { None } else { Some(val) }) .collect(); ca.rename(name); ca.into_series().into() } else { Series::new(name, val.as_slice().unwrap()).into() } } } #[staticmethod] pub fn new_f64(name: &str, val: &PyArray1<f64>, nan_is_null: bool) -> PySeries { // numpy array as slice is unsafe unsafe { if nan_is_null { let mut ca: Float64Chunked = val .as_slice() .expect("contiguous array") .iter() .map(|&val| if f64::is_nan(val) { None } else { Some(val) }) .collect(); ca.rename(name); ca.into_series().into() } else { Series::new(name, val.as_slice().unwrap()).into() } } } } // Init with lists that can contain Nones macro_rules! init_method_opt { ($name:ident, $type:ty) => { #[pymethods] impl PySeries { #[staticmethod] pub fn $name(name: &str, val: Wrap<ChunkedArray<$type>>) -> PySeries { let mut s = val.0.into_series(); s.rename(name); PySeries { series: s } } } }; } init_method_opt!(new_opt_u8, UInt8Type); init_method_opt!(new_opt_u16, UInt16Type); init_method_opt!(new_opt_u32, UInt32Type); init_method_opt!(new_opt_u64, UInt64Type); init_method_opt!(new_opt_i8, Int8Type); init_method_opt!(new_opt_i16, Int16Type); init_method_opt!(new_opt_i32, Int32Type); init_method_opt!(new_opt_i64, Int64Type); init_method_opt!(new_opt_f32, Float32Type); init_method_opt!(new_opt_f64, Float64Type); init_method_opt!(new_opt_bool, BooleanType); init_method_opt!(new_opt_date32, Int32Type); init_method_opt!(new_opt_date64, Int64Type); init_method_opt!(new_opt_duration_ns, Int64Type); init_method_opt!(new_opt_time_ns, Int64Type); impl From<Series> for PySeries { fn from(s: Series) -> Self { PySeries::new(s) } } macro_rules! parse_temporal_from_str_slice { ($name:ident, $ca_type:ident) => { #[pymethods] impl PySeries { #[staticmethod] pub fn $name(name: &str, val: Vec<&str>, fmt: &str) -> Self { let parsed = $ca_type::parse_from_str_slice(name, &val, fmt); PySeries::new(parsed.into_series()) } } }; } // TODO: add other temporals parse_temporal_from_str_slice!(parse_date32_from_str_slice, Date32Chunked); #[pymethods] #[allow( clippy::wrong_self_convention, clippy::should_implement_trait, clippy::len_without_is_empty )] impl PySeries { #[staticmethod] pub fn new_str(name: &str, val: Wrap<Utf8Chunked>) -> Self { let mut s = val.0.into_series(); s.rename(name); PySeries::new(s) } #[staticmethod] pub fn new_object(name: &str, val: Vec<ObjectValue>) -> Self { let s = ObjectChunked::<ObjectValue>::new_from_vec(name, val).into_series(); s.into() } #[staticmethod] pub fn repeat(name: &str, val: &str, n: usize) -> Self { let mut ca: Utf8Chunked = (0..n).map(|_| val).collect(); ca.rename(name); ca.into_series().into() } #[staticmethod] pub fn from_arrow(name: &str, array: &PyAny) -> PyResult<Self> { let arr = array_to_rust(array)?; let series: Series = std::convert::TryFrom::try_from((name, arr)).map_err(PyPolarsEr::from)?; Ok(series.into()) } pub fn get_object(&self, index: usize) -> PyObject { let gil = Python::acquire_gil(); let python = gil.python(); if matches!(self.series.dtype(), DataType::Object) { // we don't use the null bitmap in this context as T::default is pyobject None let any = self.series.get_as_any(index); let obj: &ObjectValue = any.into(); obj.to_object(python) } else { python.None() } } pub fn get_fmt(&self, index: usize) -> String { format!("{}", self.series.get(index)) } pub fn rechunk(&mut self, in_place: bool) -> Option<Self> { let series = self.series.rechunk(); if in_place { self.series = series; None } else { Some(PySeries::new(series)) } } pub fn bitand(&self, other: &PySeries) -> Self { let s = self .series .bool() .expect("boolean") .bitand(other.series.bool().expect("boolean")) .into_series(); s.into() } pub fn bitor(&self, other: &PySeries) -> Self { let s = self .series .bool() .expect("boolean") .bitor(other.series.bool().expect("boolean")) .into_series(); s.into() } pub fn cum_sum(&self, reverse: bool) -> Self { self.series.cum_sum(reverse).into() } pub fn cum_max(&self, reverse: bool) -> Self { self.series.cum_max(reverse).into() } pub fn cum_min(&self, reverse: bool) -> Self { self.series.cum_min(reverse).into() } pub fn chunk_lengths(&self) -> Vec<usize> { self.series.chunk_lengths().clone() } pub fn name(&self) -> &str { self.series.name() } pub fn rename(&mut self, name: &str) { self.series.rename(name); } pub fn dtype(&self) -> u8 { let dt: PyDataType = self.series.dtype().into(); dt as u8 } pub fn n_chunks(&self) -> usize { self.series.n_chunks() } pub fn limit(&self, num_elements: usize) -> PyResult<Self> { let series = self.series.limit(num_elements).map_err(PyPolarsEr::from)?; Ok(PySeries { series }) } pub fn slice(&self, offset: usize, length: usize) -> PyResult<Self> { let series = self .series .slice(offset, length) .map_err(PyPolarsEr::from)?; Ok(PySeries { series }) } pub fn append(&mut self, other: &PySeries) -> PyResult<()> { self.series .append(&other.series) .map_err(PyPolarsEr::from)?; Ok(()) } pub fn filter(&self, filter: &PySeries) -> PyResult<Self> { let filter_series = &filter.series; if let Ok(ca) = filter_series.bool() { let series = self.series.filter(ca).map_err(PyPolarsEr::from)?; Ok(PySeries { series }) } else { Err(PyRuntimeError::new_err("Expected a boolean mask")) } } pub fn add(&self, other: &PySeries) -> PyResult<Self> { Ok(PySeries::new(&self.series + &other.series)) } pub fn sub(&self, other: &PySeries) -> PyResult<Self> { Ok(PySeries::new(&self.series - &other.series)) } pub fn mul(&self, other: &PySeries) -> PyResult<Self> { Ok(PySeries::new(&self.series * &other.series)) } pub fn div(&self, other: &PySeries) -> PyResult<Self> { Ok(PySeries::new(&self.series / &other.series)) } pub fn head(&self, length: Option<usize>) -> PyResult<Self> { Ok(PySeries::new(self.series.head(length))) } pub fn tail(&self, length: Option<usize>) -> PyResult<Self> { Ok(PySeries::new(self.series.tail(length))) } pub fn sort_in_place(&mut self, reverse: bool) { self.series.sort_in_place(reverse); } pub fn sort(&mut self, reverse: bool) -> Self { PySeries::new(self.series.sort(reverse)) } pub fn argsort(&self, reverse: bool) -> Py<PyArray1<u32>> { let gil = pyo3::Python::acquire_gil(); let pyarray = PyArray1::from_iter( gil.python(), self.series.argsort(reverse).into_iter().flatten(), ); pyarray.to_owned() } pub fn unique(&self) -> PyResult<Self> { let unique = self.series.unique().map_err(PyPolarsEr::from)?; Ok(unique.into()) } pub fn value_counts(&self) -> PyResult<PyDataFrame> { let df = self.series.value_counts().map_err(PyPolarsEr::from)?; Ok(df.into()) } pub fn arg_unique(&self) -> PyResult<Py<PyArray1<u32>>> { let gil = pyo3::Python::acquire_gil(); let arg_unique = self.series.arg_unique().map_err(PyPolarsEr::from)?; let pyarray = PyArray1::from_vec(gil.python(), arg_unique); Ok(pyarray.to_owned()) } pub fn take(&self, indices: Vec<usize>) -> Self { let take = self.series.take_iter(&mut indices.iter().copied()); PySeries::new(take) } pub fn take_with_series(&self, indices: &PySeries) -> PyResult<Self> { let idx = indices.series.u32().map_err(PyPolarsEr::from)?; let take = self.series.take(&idx); Ok(PySeries::new(take)) } pub fn null_count(&self) -> PyResult<usize> { Ok(self.series.null_count()) } pub fn is_null(&self) -> PySeries { Self::new(self.series.is_null().into_series()) } pub fn is_not_null(&self) -> PySeries { Self::new(self.series.is_not_null().into_series()) } pub fn is_not_nan(&self) -> PyResult<Self> { let ca = self.series.is_not_nan().map_err(PyPolarsEr::from)?; Ok(ca.into_series().into()) } pub fn is_nan(&self) -> PyResult<Self> { let ca = self.series.is_nan().map_err(PyPolarsEr::from)?; Ok(ca.into_series().into()) } pub fn is_finite(&self) -> PyResult<Self> { let ca = self.series.is_finite().map_err(PyPolarsEr::from)?; Ok(ca.into_series().into()) } pub fn is_infinite(&self) -> PyResult<Self> { let ca = self.series.is_infinite().map_err(PyPolarsEr::from)?; Ok(ca.into_series().into()) } pub fn is_unique(&self) -> PyResult<Self> { let ca = self.series.is_unique().map_err(PyPolarsEr::from)?; Ok(ca.into_series().into()) } pub fn arg_true(&self) -> PyResult<Self> { let ca = self.series.arg_true().map_err(PyPolarsEr::from)?; Ok(ca.into_series().into()) } pub fn sample_n(&self, n: usize, with_replacement: bool) -> PyResult<Self> { let s = self .series .sample_n(n, with_replacement) .map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn sample_frac(&self, frac: f64, with_replacement: bool) -> PyResult<Self> { let s = self .series .sample_frac(frac, with_replacement) .map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn is_duplicated(&self) -> PyResult<Self> { let ca = self.series.is_duplicated().map_err(PyPolarsEr::from)?; Ok(ca.into_series().into()) } pub fn
(&self) -> PyResult<Self> { let s = self.series.explode().map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn take_every(&self, n: usize) -> Self { let s = self.series.take_every(n); s.into() } pub fn series_equal(&self, other: &PySeries, null_equal: bool) -> bool { if null_equal { self.series.series_equal_missing(&other.series) } else { self.series.series_equal(&other.series) } } pub fn eq(&self, rhs: &PySeries) -> PyResult<Self> { Ok(Self::new(self.series.eq(&rhs.series).into_series())) } pub fn neq(&self, rhs: &PySeries) -> PyResult<Self> { Ok(Self::new(self.series.neq(&rhs.series).into_series())) } pub fn gt(&self, rhs: &PySeries) -> PyResult<Self> { Ok(Self::new(self.series.gt(&rhs.series).into_series())) } pub fn gt_eq(&self, rhs: &PySeries) -> PyResult<Self> { Ok(Self::new(self.series.gt_eq(&rhs.series).into_series())) } pub fn lt(&self, rhs: &PySeries) -> PyResult<Self> { Ok(Self::new(self.series.lt(&rhs.series).into_series())) } pub fn lt_eq(&self, rhs: &PySeries) -> PyResult<Self> { Ok(Self::new(self.series.lt_eq(&rhs.series).into_series())) } pub fn _not(&self) -> PyResult<Self> { let bool = self.series.bool().map_err(PyPolarsEr::from)?; Ok((!bool).into_series().into()) } pub fn as_str(&self) -> PyResult<String> { Ok(format!("{:?}", self.series)) } pub fn len(&self) -> usize { self.series.len() } pub fn to_list(&self) -> PyObject { let gil = Python::acquire_gil(); let python = gil.python(); let series = &self.series; let pylist = match series.dtype() { DataType::Boolean => PyList::new(python, series.bool().unwrap()), DataType::Utf8 => PyList::new(python, series.utf8().unwrap()), DataType::UInt8 => PyList::new(python, series.u8().unwrap()), DataType::UInt16 => PyList::new(python, series.u16().unwrap()), DataType::UInt32 => PyList::new(python, series.u32().unwrap()), DataType::UInt64 => PyList::new(python, series.u64().unwrap()), DataType::Int8 => PyList::new(python, series.i8().unwrap()), DataType::Int16 => PyList::new(python, series.i16().unwrap()), DataType::Int32 => PyList::new(python, series.i32().unwrap()), DataType::Int64 => PyList::new(python, series.i64().unwrap()), DataType::Float32 => PyList::new(python, series.f32().unwrap()), DataType::Float64 => PyList::new(python, series.f64().unwrap()), DataType::Date32 => PyList::new(python, series.date32().unwrap()), DataType::Date64 => PyList::new(python, series.date64().unwrap()), DataType::Time64(TimeUnit::Nanosecond) => { PyList::new(python, series.time64_nanosecond().unwrap()) } DataType::Duration(TimeUnit::Nanosecond) => { PyList::new(python, series.duration_nanosecond().unwrap()) } DataType::Duration(TimeUnit::Millisecond) => { PyList::new(python, series.duration_millisecond().unwrap()) } DataType::Object => { let v = PyList::empty(python); for i in 0..series.len() { let val = series .get_as_any(i) .downcast_ref::<ObjectValue>() .map(|obj| obj.inner.clone()) .unwrap_or_else(|| python.None()); v.append(val).unwrap(); } v } dt => panic!("to_list() not implemented for {:?}", dt), }; pylist.to_object(python) } /// Rechunk and return a pointer to the start of the Series. /// Only implemented for numeric types pub fn as_single_ptr(&mut self) -> PyResult<usize> { let ptr = self.series.as_single_ptr().map_err(PyPolarsEr::from)?; Ok(ptr) } pub fn drop_nulls(&self) -> Self { self.series.drop_nulls().into() } pub fn fill_none(&self, strategy: &str) -> PyResult<Self> { let strat = match strategy { "backward" => FillNoneStrategy::Backward, "forward" => FillNoneStrategy::Forward, "min" => FillNoneStrategy::Min, "max" => FillNoneStrategy::Max, "mean" => FillNoneStrategy::Mean, s => return Err(PyPolarsEr::Other(format!("Strategy {} not supported", s)).into()), }; let series = self.series.fill_none(strat).map_err(PyPolarsEr::from)?; Ok(PySeries::new(series)) } pub fn to_arrow(&mut self) -> PyResult<PyObject> { self.rechunk(true); let gil = Python::acquire_gil(); let py = gil.python(); let pyarrow = py.import("pyarrow")?; arrow_interop::to_py::to_py_array(&self.series.chunks()[0], py, pyarrow) } pub fn clone(&self) -> Self { PySeries::new(self.series.clone()) } pub fn apply_lambda(&self, lambda: &PyAny, output_type: &PyAny) -> PyResult<PySeries> { let gil = Python::acquire_gil(); let py = gil.python(); let series = &self.series; let output_type = match output_type.is_none() { true => None, false => { let str_repr = output_type.str().unwrap().to_str().unwrap(); Some(str_to_polarstype(str_repr)) } }; let out = match output_type { Some(DataType::Int8) => { let ca: Int8Chunked = apply_method_all_arrow_series!( series, apply_lambda_with_primitive_out_type, py, lambda, 0, None )?; ca.into_series() } Some(DataType::Int16) => { let ca: Int16Chunked = apply_method_all_arrow_series!( series, apply_lambda_with_primitive_out_type, py, lambda, 0, None )?; ca.into_series() } Some(DataType::Int32) => { let ca: Int32Chunked = apply_method_all_arrow_series!( series, apply_lambda_with_primitive_out_type, py, lambda, 0, None )?; ca.into_series() } Some(DataType::Int64) => { let ca: Int64Chunked = apply_method_all_arrow_series!( series, apply_lambda_with_primitive_out_type, py, lambda, 0, None )?; ca.into_series() } Some(DataType::UInt8) => { let ca: UInt8Chunked = apply_method_all_arrow_series!( series, apply_lambda_with_primitive_out_type, py, lambda, 0, None )?; ca.into_series() } Some(DataType::UInt16) => { let ca: UInt16Chunked = apply_method_all_arrow_series!( series, apply_lambda_with_primitive_out_type, py, lambda, 0, None )?; ca.into_series() } Some(DataType::UInt32) => { let ca: UInt32Chunked = apply_method_all_arrow_series!( series, apply_lambda_with_primitive_out_type, py, lambda, 0, None )?; ca.into_series() } Some(DataType::UInt64) => { let ca: UInt64Chunked = apply_method_all_arrow_series!( series, apply_lambda_with_primitive_out_type, py, lambda, 0, None )?; ca.into_series() } Some(DataType::Float32) => { let ca: Float32Chunked = apply_method_all_arrow_series!( series, apply_lambda_with_primitive_out_type, py, lambda, 0, None )?; ca.into_series() } Some(DataType::Float64) => { let ca: Float64Chunked = apply_method_all_arrow_series!( series, apply_lambda_with_primitive_out_type, py, lambda, 0, None )?; ca.into_series() } Some(DataType::Boolean) => { let ca: BooleanChunked = apply_method_all_arrow_series!( series, apply_lambda_with_bool_out_type, py, lambda, 0, None )?; ca.into_series() } Some(DataType::Date32) => { let ca: Date32Chunked = apply_method_all_arrow_series!( series, apply_lambda_with_primitive_out_type, py, lambda, 0, None )?; ca.into_series() } Some(DataType::Date64) => { let ca: Date64Chunked = apply_method_all_arrow_series!( series, apply_lambda_with_primitive_out_type, py, lambda, 0, None )?; ca.into_series() } Some(DataType::Utf8) => { let ca: Utf8Chunked = apply_method_all_arrow_series!( series, apply_lambda_with_utf8_out_type, py, lambda, 0, None )?; ca.into_series() } None => { return apply_method_all_arrow_series!(series, apply_lambda_unknown, py, lambda) } _ => return apply_method_all_arrow_series!(series, apply_lambda, py, lambda), }; Ok(PySeries::new(out)) } pub fn shift(&self, periods: i64) -> Self { let s = self.series.shift(periods); PySeries::new(s) } pub fn zip_with(&self, mask: &PySeries, other: &PySeries) -> PyResult<Self> { let mask = mask.series.bool().map_err(PyPolarsEr::from)?; let s = self .series .zip_with(mask, &other.series) .map_err(PyPolarsEr::from)?; Ok(PySeries::new(s)) } pub fn str_lengths(&self) -> PyResult<Self> { let ca = self.series.utf8().map_err(PyPolarsEr::from)?; let s = ca.str_lengths().into_series(); Ok(PySeries::new(s)) } pub fn str_contains(&self, pat: &str) -> PyResult<Self> { let ca = self.series.utf8().map_err(PyPolarsEr::from)?; let s = ca.contains(pat).map_err(PyPolarsEr::from)?.into_series(); Ok(s.into()) } pub fn str_replace(&self, pat: &str, val: &str) -> PyResult<Self> { let ca = self.series.utf8().map_err(PyPolarsEr::from)?; let s = ca .replace(pat, val) .map_err(PyPolarsEr::from)? .into_series(); Ok(s.into()) } pub fn str_replace_all(&self, pat: &str, val: &str) -> PyResult<Self> { let ca = self.series.utf8().map_err(PyPolarsEr::from)?; let s = ca .replace_all(pat, val) .map_err(PyPolarsEr::from)? .into_series(); Ok(s.into()) } pub fn str_to_uppercase(&self) -> PyResult<Self> { let ca = self.series.utf8().map_err(PyPolarsEr::from)?; let s = ca.to_uppercase().into_series(); Ok(s.into()) } pub fn str_to_lowercase(&self) -> PyResult<Self> { let ca = self.series.utf8().map_err(PyPolarsEr::from)?; let s = ca.to_lowercase().into_series(); Ok(s.into()) } pub fn str_parse_date32(&self, fmt: Option<&str>) -> PyResult<Self> { if let Ok(ca) = &self.series.utf8() { let ca = ca.as_date32(fmt).map_err(PyPolarsEr::from)?; Ok(PySeries::new(ca.into_series())) } else { Err(PyPolarsEr::Other("cannot parse date32 expected utf8 type".into()).into()) } } pub fn str_parse_date64(&self, fmt: Option<&str>) -> PyResult<Self> { if let Ok(ca) = &self.series.utf8() { let ca = ca.as_date64(fmt).map_err(PyPolarsEr::from)?; Ok(ca.into_series().into()) } else { Err(PyPolarsEr::Other("cannot parse date64 expected utf8 type".into()).into()) } } pub fn datetime_str_fmt(&self, fmt: &str) -> PyResult<Self> { let s = self .series .datetime_str_fmt(fmt) .map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn as_duration(&self) -> PyResult<Self> { match self.series.dtype() { DataType::Date64 => { let ca = self.series.date64().unwrap().as_duration(); Ok(ca.into_series().into()) } DataType::Date32 => { let ca = self.series.date32().unwrap().as_duration(); Ok(ca.into_series().into()) } _ => Err(PyPolarsEr::Other( "Only date32 and date64 can be transformed as duration".into(), ) .into()), } } pub fn to_dummies(&self) -> PyResult<PyDataFrame> { let df = self.series.to_dummies().map_err(PyPolarsEr::from)?; Ok(df.into()) } pub fn get_list(&self, index: usize) -> Option<Self> { if let Ok(ca) = &self.series.list() { let s = ca.get(index); s.map(|s| s.into()) } else { None } } pub fn rolling_sum( &self, window_size: usize, weight: Option<Vec<f64>>, ignore_null: bool, ) -> PyResult<Self> { let s = self .series .rolling_sum(window_size, weight.as_deref(), ignore_null) .map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn rolling_mean( &self, window_size: usize, weight: Option<Vec<f64>>, ignore_null: bool, ) -> PyResult<Self> { let s = self .series .rolling_mean(window_size, weight.as_deref(), ignore_null) .map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn rolling_max( &self, window_size: usize, weight: Option<Vec<f64>>, ignore_null: bool, ) -> PyResult<Self> { let s = self .series .rolling_max(window_size, weight.as_deref(), ignore_null) .map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn rolling_min( &self, window_size: usize, weight: Option<Vec<f64>>, ignore_null: bool, ) -> PyResult<Self> { let s = self .series .rolling_min(window_size, weight.as_deref(), ignore_null) .map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn year(&self) -> PyResult<Self> { let s = self.series.year().map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn month(&self) -> PyResult<Self> { let s = self.series.month().map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn day(&self) -> PyResult<Self> { let s = self.series.day().map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn ordinal_day(&self) -> PyResult<Self> { let s = self.series.ordinal_day().map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn hour(&self) -> PyResult<Self> { let s = self.series.hour().map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn minute(&self) -> PyResult<Self> { let s = self.series.minute().map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn second(&self) -> PyResult<Self> { let s = self.series.second().map_err(PyPolarsEr::from)?; Ok(s.into()) } pub fn nanosecond(&self) -> PyResult<Self> { let s = self.series.nanosecond().map_err(PyPolarsEr::from)?; Ok(s.into()) } } macro_rules! impl_ufuncs { ($name:ident, $type:ty, $unsafe_from_ptr_method:ident) => { #[pymethods] impl PySeries { // applies a ufunc by accepting a lambda out: ufunc(*args, out=out) // the out array is allocated in this method, send to Python and once the ufunc is applied // ownership is taken by Rust again to prevent memory leak. // if the ufunc fails, we first must take ownership back. pub fn $name(&self, lambda: &PyAny) -> PyResult<PySeries> { // numpy array object, and a *mut ptr let gil = Python::acquire_gil(); let py = gil.python(); let size = self.len(); let (out_array, ptr) = unsafe { aligned_array::<$type>(py, size) }; debug_assert_eq!(out_array.get_refcnt(), 1); // inserting it in a tuple increase the reference count by 1. let args = PyTuple::new(py, &[out_array]); debug_assert_eq!(out_array.get_refcnt(), 2); // whatever the result, we must take the leaked memory ownership back let s = match lambda.call1(args) { Ok(_) => { // if this assert fails, the lambda has taken a reference to the object, so we must panic // args and the lambda return have a reference, making a total of 3 assert_eq!(out_array.get_refcnt(), 3); self.$unsafe_from_ptr_method(ptr as usize, size) } Err(e) => { // first take ownership from the leaked memory // so the destructor gets called when we go out of scope self.$unsafe_from_ptr_method(ptr as usize, size); // return error information return Err(e); } }; Ok(s) } } }; } impl_ufuncs!(apply_ufunc_f32, f32, unsafe_from_ptr_f32); impl_ufuncs!(apply_ufunc_f64, f64, unsafe_from_ptr_f64); impl_ufuncs!(apply_ufunc_u8, u8, unsafe_from_ptr_u8); impl_ufuncs!(apply_ufunc_u16, u16, unsafe_from_ptr_u16); impl_ufuncs!(apply_ufunc_u32, u32, unsafe_from_ptr_u32); impl_ufuncs!(apply_ufunc_u64, u64, unsafe_from_ptr_u64); impl_ufuncs!(apply_ufunc_i8, i8, unsafe_from_ptr_i8); impl_ufuncs!(apply_ufunc_i16, i16, unsafe_from_ptr_i16); impl_ufuncs!(apply_ufunc_i32, i32, unsafe_from_ptr_i32); impl_ufuncs!(apply_ufunc_i64, i64, unsafe_from_ptr_i64); macro_rules! impl_set_with_mask { ($name:ident, $native:ty, $cast:ident, $variant:ident) => { fn $name(series: &Series, filter: &PySeries, value: Option<$native>) -> Result<Series> { let mask = filter.series.bool()?; let ca = series.$cast()?; let new = ca.set(mask, value)?; Ok(new.into_series()) } #[pymethods] impl PySeries { pub fn $name(&self, filter: &PySeries, value: Option<$native>) -> PyResult<Self> { let series = $name(&self.series, filter, value).map_err(PyPolarsEr::from)?; Ok(Self::new(series)) } } }; } impl_set_with_mask!(set_with_mask_str, &str, utf8, Utf8); impl_set_with_mask!(set_with_mask_f64, f64, f64, Float64); impl_set_with_mask!(set_with_mask_f32, f32, f32, Float32); impl_set_with_mask!(set_with_mask_u8, u8, u8, UInt8); impl_set_with_mask!(set_with_mask_u16, u16, u16, UInt16); impl_set_with_mask!(set_with_mask_u32, u32, u32, UInt32); impl_set_with_mask!(set_with_mask_u64, u64, u64, UInt64); impl_set_with_mask!(set_with_mask_i8, i8, i8, Int8); impl_set_with_mask!(set_with_mask_i16, i16, i16, Int16); impl_set_with_mask!(set_with_mask_i32, i32, i32, Int32); impl_set_with_mask!(set_with_mask_i64, i64, i64, Int64); impl_set_with_mask!(set_with_mask_bool, bool, bool, Boolean); macro_rules! impl_set_at_idx { ($name:ident, $native:ty, $cast:ident, $variant:ident) => { fn $name(series: &Series, idx: &[usize], value: Option<$native>) -> Result<Series> { let ca = series.$cast()?; let new = ca.set_at_idx(idx.iter().copied(), value)?; Ok(new.into_series()) } #[pymethods] impl PySeries { pub fn $name(&self, idx: &PyArray1<usize>, value: Option<$native>) -> PyResult<Self> { let idx = unsafe { idx.as_slice().unwrap() }; let series = $name(&self.series, &idx, value).map_err(PyPolarsEr::from)?; Ok(Self::new(series)) } } }; } impl_set_at_idx!(set_at_idx_str, &str, utf8, Utf8); impl_set_at_idx!(set_at_idx_f64, f64, f64, Float64); impl_set_at_idx!(set_at_idx_f32, f32, f32, Float32); impl_set_at_idx!(set_at_idx_u8, u8, u8, UInt8); impl_set_at_idx!(set_at_idx_u16, u16, u16, UInt16); impl_set_at_idx!(set_at_idx_u32, u32, u32, UInt32); impl_set_at_idx!(set_at_idx_u64, u64, u64, UInt64); impl_set_at_idx!(set_at_idx_i8, i8, i8, Int8); impl_set_at_idx!(set_at_idx_i16, i16, i16, Int16); impl_set_at_idx!(set_at_idx_i32, i32, i32, Int32); impl_set_at_idx!(set_at_idx_i64, i64, i64, Int64); macro_rules! impl_get { ($name:ident, $series_variant:ident, $type:ty) => { #[pymethods] impl PySeries { pub fn $name(&self, index: usize) -> Option<$type> { if let Ok(ca) = self.series.$series_variant() { ca.get(index) } else { None } } } }; } impl_get!(get_f32, f32, f32); impl_get!(get_f64, f64, f64); impl_get!(get_u8, u8, u8); impl_get!(get_u16, u16, u16); impl_get!(get_u32, u32, u32); impl_get!(get_u64, u64, u64); impl_get!(get_i8, i8, i8); impl_get!(get_i16, i16, i16); impl_get!(get_i32, i32, i32); impl_get!(get_i64, i64, i64); impl_get!(get_str, utf8, &str); impl_get!(get_date32, date32, i32); impl_get!(get_date64, date64, i64); // Not public methods. macro_rules! impl_unsafe_from_ptr { ($name:ident, $ca_type:ident) => { impl PySeries { fn $name(&self, ptr: usize, len: usize) -> Self { let av = unsafe { AlignedVec::from_ptr(ptr, len, len) }; let (_null_count, null_bitmap) = get_bitmap(self.series.chunks()[0].as_ref()); let ca = ChunkedArray::<$ca_type>::new_from_owned_with_null_bitmap( self.name(), av, null_bitmap, ); Self::new(ca.into_series()) } } }; } impl_unsafe_from_ptr!(unsafe_from_ptr_f32, Float32Type); impl_unsafe_from_ptr!(unsafe_from_ptr_f64, Float64Type); impl_unsafe_from_ptr!(unsafe_from_ptr_u8, UInt8Type); impl_unsafe_from_ptr!(unsafe_from_ptr_u16, UInt16Type); impl_unsafe_from_ptr!(unsafe_from_ptr_u32, UInt32Type); impl_unsafe_from_ptr!(unsafe_from_ptr_u64, UInt64Type); impl_unsafe_from_ptr!(unsafe_from_ptr_i8, Int8Type); impl_unsafe_from_ptr!(unsafe_from_ptr_i16, Int16Type); impl_unsafe_from_ptr!(unsafe_from_ptr_i32, Int32Type); impl_unsafe_from_ptr!(unsafe_from_ptr_i64, Int64Type); macro_rules! impl_cast { ($name:ident, $type:ty) => { #[pymethods] impl PySeries { pub fn $name(&self) -> PyResult<PySeries> { let s = self.series.cast::<$type>().map_err(PyPolarsEr::from)?; Ok(PySeries::new(s)) } } }; } impl_cast!(cast_u8, UInt8Type); impl_cast!(cast_u16, UInt16Type); impl_cast!(cast_u32, UInt32Type); impl_cast!(cast_u64, UInt64Type); impl_cast!(cast_i8, Int8Type); impl_cast!(cast_i16, Int16Type); impl_cast!(cast_i32, Int32Type); impl_cast!(cast_i64, Int64Type); impl_cast!(cast_f32, Float32Type); impl_cast!(cast_f64, Float64Type); impl_cast!(cast_date32, Date32Type); impl_cast!(cast_date64, Date64Type); impl_cast!(cast_time64ns, Time64NanosecondType); impl_cast!(cast_duration_ns, DurationNanosecondType); impl_cast!(cast_str, Utf8Type); impl_cast!(cast_categorical, CategoricalType); macro_rules! impl_arithmetic { ($name:ident, $type:ty, $operand:tt) => { #[pymethods] impl PySeries { pub fn $name(&self, other: $type) -> PyResult<PySeries> { Ok(PySeries::new(&self.series $operand other)) } } }; } impl_arithmetic!(add_u8, u8, +); impl_arithmetic!(add_u16, u16, +); impl_arithmetic!(add_u32, u32, +); impl_arithmetic!(add_u64, u64, +); impl_arithmetic!(add_i8, i8, +); impl_arithmetic!(add_i16, i16, +); impl_arithmetic!(add_i32, i32, +); impl_arithmetic!(add_i64, i64, +); impl_arithmetic!(add_f32, f32, +); impl_arithmetic!(add_f64, f64, +); impl_arithmetic!(sub_u8, u8, -); impl_arithmetic!(sub_u16, u16, -); impl_arithmetic!(sub_u32, u32, -); impl_arithmetic!(sub_u64, u64, -); impl_arithmetic!(sub_i8, i8, -); impl_arithmetic!(sub_i16, i16, -); impl_arithmetic!(sub_i32, i32, -); impl_arithmetic!(sub_i64, i64, -); impl_arithmetic!(sub_f32, f32, -); impl_arithmetic!(sub_f64, f64, -); impl_arithmetic!(div_u8, u8, /); impl_arithmetic!(div_u16, u16, /); impl_arithmetic!(div_u32, u32, /); impl_arithmetic!(div_u64, u64, /); impl_arithmetic!(div_i8, i8, /); impl_arithmetic!(div_i16, i16, /); impl_arithmetic!(div_i32, i32, /); impl_arithmetic!(div_i64, i64, /); impl_arithmetic!(div_f32, f32, /); impl_arithmetic!(div_f64, f64, /); impl_arithmetic!(mul_u8, u8, *); impl_arithmetic!(mul_u16, u16, *); impl_arithmetic!(mul_u32, u32, *); impl_arithmetic!(mul_u64, u64, *); impl_arithmetic!(mul_i8, i8, *); impl_arithmetic!(mul_i16, i16, *); impl_arithmetic!(mul_i32, i32, *); impl_arithmetic!(mul_i64, i64, *); impl_arithmetic!(mul_f32, f32, *); impl_arithmetic!(mul_f64, f64, *); macro_rules! impl_rhs_arithmetic { ($name:ident, $type:ty, $operand:ident) => { #[pymethods] impl PySeries { pub fn $name(&self, other: $type) -> PyResult<PySeries> { Ok(PySeries::new(other.$operand(&self.series))) } } }; } impl_rhs_arithmetic!(add_u8_rhs, u8, add); impl_rhs_arithmetic!(add_u16_rhs, u16, add); impl_rhs_arithmetic!(add_u32_rhs, u32, add); impl_rhs_arithmetic!(add_u64_rhs, u64, add); impl_rhs_arithmetic!(add_i8_rhs, i8, add); impl_rhs_arithmetic!(add_i16_rhs, i16, add); impl_rhs_arithmetic!(add_i32_rhs, i32, add); impl_rhs_arithmetic!(add_i64_rhs, i64, add); impl_rhs_arithmetic!(add_f32_rhs, f32, add); impl_rhs_arithmetic!(add_f64_rhs, f64, add); impl_rhs_arithmetic!(sub_u8_rhs, u8, sub); impl_rhs_arithmetic!(sub_u16_rhs, u16, sub); impl_rhs_arithmetic!(sub_u32_rhs, u32, sub); impl_rhs_arithmetic!(sub_u64_rhs, u64, sub); impl_rhs_arithmetic!(sub_i8_rhs, i8, sub); impl_rhs_arithmetic!(sub_i16_rhs, i16, sub); impl_rhs_arithmetic!(sub_i32_rhs, i32, sub); impl_rhs_arithmetic!(sub_i64_rhs, i64, sub); impl_rhs_arithmetic!(sub_f32_rhs, f32, sub); impl_rhs_arithmetic!(sub_f64_rhs, f64, sub); impl_rhs_arithmetic!(div_u8_rhs, u8, div); impl_rhs_arithmetic!(div_u16_rhs, u16, div); impl_rhs_arithmetic!(div_u32_rhs, u32, div); impl_rhs_arithmetic!(div_u64_rhs, u64, div); impl_rhs_arithmetic!(div_i8_rhs, i8, div); impl_rhs_arithmetic!(div_i16_rhs, i16, div); impl_rhs_arithmetic!(div_i32_rhs, i32, div); impl_rhs_arithmetic!(div_i64_rhs, i64, div); impl_rhs_arithmetic!(div_f32_rhs, f32, div); impl_rhs_arithmetic!(div_f64_rhs, f64, div); impl_rhs_arithmetic!(mul_u8_rhs, u8, mul); impl_rhs_arithmetic!(mul_u16_rhs, u16, mul); impl_rhs_arithmetic!(mul_u32_rhs, u32, mul); impl_rhs_arithmetic!(mul_u64_rhs, u64, mul); impl_rhs_arithmetic!(mul_i8_rhs, i8, mul); impl_rhs_arithmetic!(mul_i16_rhs, i16, mul); impl_rhs_arithmetic!(mul_i32_rhs, i32, mul); impl_rhs_arithmetic!(mul_i64_rhs, i64, mul); impl_rhs_arithmetic!(mul_f32_rhs, f32, mul); impl_rhs_arithmetic!(mul_f64_rhs, f64, mul); macro_rules! impl_sum { ($name:ident, $type:ty) => { #[pymethods] impl PySeries { pub fn $name(&self) -> PyResult<Option<$type>> { Ok(self.series.sum()) } } }; } impl_sum!(sum_u8, u8); impl_sum!(sum_u16, u16); impl_sum!(sum_u32, u32); impl_sum!(sum_u64, u64); impl_sum!(sum_i8, i8); impl_sum!(sum_i16, i16); impl_sum!(sum_i32, i32); impl_sum!(sum_i64, i64); impl_sum!(sum_f32, f32); impl_sum!(sum_f64, f64); macro_rules! impl_min { ($name:ident, $type:ty) => { #[pymethods] impl PySeries { pub fn $name(&self) -> PyResult<Option<$type>> { Ok(self.series.min()) } } }; } impl_min!(min_u8, u8); impl_min!(min_u16, u16); impl_min!(min_u32, u32); impl_min!(min_u64, u64); impl_min!(min_i8, i8); impl_min!(min_i16, i16); impl_min!(min_i32, i32); impl_min!(min_i64, i64); impl_min!(min_f32, f32); impl_min!(min_f64, f64); macro_rules! impl_max { ($name:ident, $type:ty) => { #[pymethods] impl PySeries { pub fn $name(&self) -> PyResult<Option<$type>> { Ok(self.series.max()) } } }; } impl_max!(max_u8, u8); impl_max!(max_u16, u16); impl_max!(max_u32, u32); impl_max!(max_u64, u64); impl_max!(max_i8, i8); impl_max!(max_i16, i16); impl_max!(max_i32, i32); impl_max!(max_i64, i64); impl_max!(max_f32, f32); impl_max!(max_f64, f64); macro_rules! impl_mean { ($name:ident, $type:ty) => { #[pymethods] impl PySeries { pub fn $name(&self) -> PyResult<Option<$type>> { Ok(self.series.mean()) } } }; } impl_mean!(mean_u8, u8); impl_mean!(mean_u16, u16); impl_mean!(mean_u32, u32); impl_mean!(mean_u64, u64); impl_mean!(mean_i8, i8); impl_mean!(mean_i16, i16); impl_mean!(mean_i32, i32); impl_mean!(mean_i64, i64); impl_mean!(mean_f32, f32); impl_mean!(mean_f64, f64); macro_rules! impl_eq_num { ($name:ident, $type:ty) => { #[pymethods] impl PySeries { pub fn $name(&self, rhs: $type) -> PyResult<PySeries> { Ok(PySeries::new(self.series.eq(rhs).into_series())) } } }; } impl_eq_num!(eq_u8, u8); impl_eq_num!(eq_u16, u16); impl_eq_num!(eq_u32, u32); impl_eq_num!(eq_u64, u64); impl_eq_num!(eq_i8, i8); impl_eq_num!(eq_i16, i16); impl_eq_num!(eq_i32, i32); impl_eq_num!(eq_i64, i64); impl_eq_num!(eq_f32, f32); impl_eq_num!(eq_f64, f64); impl_eq_num!(eq_str, &str); macro_rules! impl_neq_num { ($name:ident, $type:ty) => { #[pymethods] impl PySeries { pub fn $name(&self, rhs: $type) -> PyResult<PySeries> { Ok(PySeries::new(self.series.neq(rhs).into_series())) } } }; } impl_neq_num!(neq_u8, u8); impl_neq_num!(neq_u16, u16); impl_neq_num!(neq_u32, u32); impl_neq_num!(neq_u64, u64); impl_neq_num!(neq_i8, i8); impl_neq_num!(neq_i16, i16); impl_neq_num!(neq_i32, i32); impl_neq_num!(neq_i64, i64); impl_neq_num!(neq_f32, f32); impl_neq_num!(neq_f64, f64); impl_neq_num!(neq_str, &str); macro_rules! impl_gt_num { ($name:ident, $type:ty) => { #[pymethods] impl PySeries { pub fn $name(&self, rhs: $type) -> PyResult<PySeries> { Ok(PySeries::new(self.series.gt(rhs).into_series())) } } }; } impl_gt_num!(gt_u8, u8); impl_gt_num!(gt_u16, u16); impl_gt_num!(gt_u32, u32); impl_gt_num!(gt_u64, u64); impl_gt_num!(gt_i8, i8); impl_gt_num!(gt_i16, i16); impl_gt_num!(gt_i32, i32); impl_gt_num!(gt_i64, i64); impl_gt_num!(gt_f32, f32); impl_gt_num!(gt_f64, f64); impl_gt_num!(gt_str, &str); macro_rules! impl_gt_eq_num { ($name:ident, $type:ty) => { #[pymethods] impl PySeries { pub fn $name(&self, rhs: $type) -> PyResult<PySeries> { Ok(PySeries::new(self.series.gt_eq(rhs).into_series())) } } }; } impl_gt_eq_num!(gt_eq_u8, u8); impl_gt_eq_num!(gt_eq_u16, u16); impl_gt_eq_num!(gt_eq_u32, u32); impl_gt_eq_num!(gt_eq_u64, u64); impl_gt_eq_num!(gt_eq_i8, i8); impl_gt_eq_num!(gt_eq_i16, i16); impl_gt_eq_num!(gt_eq_i32, i32); impl_gt_eq_num!(gt_eq_i64, i64); impl_gt_eq_num!(gt_eq_f32, f32); impl_gt_eq_num!(gt_eq_f64, f64); impl_gt_eq_num!(gt_eq_str, &str); macro_rules! impl_lt_num { ($name:ident, $type:ty) => { #[pymethods] impl PySeries { pub fn $name(&self, rhs: $type) -> PyResult<PySeries> { Ok(PySeries::new(self.series.lt(rhs).into_series())) } } }; } impl_lt_num!(lt_u8, u8); impl_lt_num!(lt_u16, u16); impl_lt_num!(lt_u32, u32); impl_lt_num!(lt_u64, u64); impl_lt_num!(lt_i8, i8); impl_lt_num!(lt_i16, i16); impl_lt_num!(lt_i32, i32); impl_lt_num!(lt_i64, i64); impl_lt_num!(lt_f32, f32); impl_lt_num!(lt_f64, f64); impl_lt_num!(lt_str, &str); macro_rules! impl_lt_eq_num { ($name:ident, $type:ty) => { #[pymethods] impl PySeries { pub fn $name(&self, rhs: $type) -> PyResult<PySeries> { Ok(PySeries::new(self.series.lt_eq(rhs).into_series())) } } }; } impl_lt_eq_num!(lt_eq_u8, u8); impl_lt_eq_num!(lt_eq_u16, u16); impl_lt_eq_num!(lt_eq_u32, u32); impl_lt_eq_num!(lt_eq_u64, u64); impl_lt_eq_num!(lt_eq_i8, i8); impl_lt_eq_num!(lt_eq_i16, i16); impl_lt_eq_num!(lt_eq_i32, i32); impl_lt_eq_num!(lt_eq_i64, i64); impl_lt_eq_num!(lt_eq_f32, f32); impl_lt_eq_num!(lt_eq_f64, f64); impl_lt_eq_num!(lt_eq_str, &str); pub(crate) fn to_series_collection(ps: Vec<PySeries>) -> Vec<Series> { // prevent destruction of ps let mut ps = std::mem::ManuallyDrop::new(ps); // get mutable pointer and reinterpret as Series let p = ps.as_mut_ptr() as *mut Series; let len = ps.len(); let cap = ps.capacity(); // The pointer ownership will be transferred to Vec and this will be responsible for dealoc unsafe { Vec::from_raw_parts(p, len, cap) } } pub(crate) fn to_pyseries_collection(s: Vec<Series>) -> Vec<PySeries> { let mut s = std::mem::ManuallyDrop::new(s); let p = s.as_mut_ptr() as *mut PySeries; let len = s.len(); let cap = s.capacity(); unsafe { Vec::from_raw_parts(p, len, cap) } } #[cfg(test)] mod test { use super::*; #[test] fn transmute_to_series() { // NOTE: This is only possible because PySeries is #[repr(transparent)] // https://doc.rust-lang.org/reference/type-layout.html let ps = PySeries { series: [1i32, 2, 3].iter().collect(), }; let s = unsafe { std::mem::transmute::<PySeries, Series>(ps.clone()) }; assert_eq!(s.sum::<i32>(), Some(6)); let collection = vec![ps]; let s = to_series_collection(collection); assert_eq!( s.iter().map(|s| s.sum::<i32>()).collect::<Vec<_>>(), vec![Some(6)] ); } }
explode
oci_network_load_balancer.py
#!/usr/bin/python # Copyright (c) 2020, 2021 Oracle and/or its affiliates. # This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Apache License v2.0 # See LICENSE.TXT for details. # GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], "supported_by": "community", } DOCUMENTATION = """ --- module: oci_network_load_balancer short_description: Manage a NetworkLoadBalancer resource in Oracle Cloud Infrastructure description: - This module allows the user to create, update and delete a NetworkLoadBalancer resource in Oracle Cloud Infrastructure - For I(state=present), creates a network load balancer. - "This resource has the following action operations in the M(oracle.oci.oci_network_load_balancer_actions) module: change_compartment." version_added: "2.9.0" author: Oracle (@oracle) options: compartment_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment containing the network load balancer. - Required for create using I(state=present). - Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set. - Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set. type: str display_name: description: - Network load balancer identifier, which can be renamed. - Required for create using I(state=present). - Required for update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set. - This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set. type: str aliases: ["name"] is_preserve_source_destination: description: - This parameter can be enabled only if backends are compute OCIDs. When enabled, the skipSourceDestinationCheck parameter is automatically enabled on the load balancer VNIC, and packets are sent to the backend with the entire IP header intact. - This parameter is updatable. type: bool reserved_ips: description: - An array of reserved Ips. type: list elements: dict suboptions: id: description: - OCID of the reserved public IP address created with the virtual cloud network. - Reserved public IP addresses are IP addresses that are registered using the virtual cloud network API. - Create a reserved public IP address. When you create the network load balancer, enter the OCID of the reserved public IP address in the reservedIp field to attach the IP address to the network load balancer. This task configures the network load balancer to listen to traffic on this IP address. - Reserved public IP addresses are not deleted when the network load balancer is deleted. The IP addresses become unattached from the network load balancer. - "Example: \\"ocid1.publicip.oc1.phx.unique_ID\\"" type: str is_private: description: - Whether the network load balancer has a virtual cloud network-local (private) IP address. - "If \\"true\\", then the service assigns a private IP address to the network load balancer." - "If \\"false\\", then the service assigns a public IP address to the network load balancer." - A public network load balancer is accessible from the internet, depending on the L(security list rules,https://docs.cloud.oracle.com/Content/network/Concepts/securitylists.htm) for your virtual cloud network. For more information about public and private network load balancers, see L(How Network Load Balancing Works,https://docs.cloud.oracle.com/Content/Balance/Concepts/balanceoverview.htm#how-network-load-balancing- works). This value is true by default. - "Example: `true`" type: bool subnet_id: description: - The subnet in which the network load balancer is spawned L(OCIDs,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm). - Required for create using I(state=present). type: str freeform_tags: description: - "Simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only. Example: `{\\"bar-key\\": \\"value\\"}`" - This parameter is updatable. type: dict defined_tags: description: - "Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`" - This parameter is updatable. type: dict network_load_balancer_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network load balancer to update. - Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set. - Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set. type: str aliases: ["id"] state: description: - The state of the NetworkLoadBalancer. - Use I(state=present) to create or update a NetworkLoadBalancer. - Use I(state=absent) to delete a NetworkLoadBalancer. type: str required: false default: 'present' choices: ["present", "absent"] extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ] """ EXAMPLES = """ - name: Create network_load_balancer oci_network_load_balancer: # required compartment_id: "ocid1.compartment.oc1..unique_ID" display_name: example_network_load_balancer subnet_id: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx" # optional is_preserve_source_destination: true reserved_ips: - # optional id: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx" is_private: true freeform_tags: {'Department': 'Finance'} defined_tags: {'Operations': {'CostCenter': 'US'}} - name: Update network_load_balancer oci_network_load_balancer: # required network_load_balancer_id: "ocid1.networkloadbalancer.oc1..xxxxxxEXAMPLExxxxxx" # optional display_name: example_network_load_balancer is_preserve_source_destination: true freeform_tags: {'Department': 'Finance'} defined_tags: {'Operations': {'CostCenter': 'US'}} - name: Update network_load_balancer using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set) oci_network_load_balancer: # required compartment_id: "ocid1.compartment.oc1..unique_ID" display_name: example_network_load_balancer # optional is_preserve_source_destination: true freeform_tags: {'Department': 'Finance'} defined_tags: {'Operations': {'CostCenter': 'US'}} - name: Delete network_load_balancer oci_network_load_balancer: # required network_load_balancer_id: "ocid1.networkloadbalancer.oc1..xxxxxxEXAMPLExxxxxx" state: absent - name: Delete network_load_balancer using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set) oci_network_load_balancer: # required compartment_id: "ocid1.compartment.oc1..unique_ID" display_name: example_network_load_balancer state: absent """ RETURN = """ network_load_balancer: description: - Details of the NetworkLoadBalancer resource acted upon by the current operation returned: on success type: complex contains: id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network load balancer. returned: on success type: str sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx" compartment_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment containing the network load balancer. returned: on success type: str sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx" display_name: description: - A user-friendly name, which does not have to be unique, and can be changed. - "Example: `example_load_balancer`" returned: on success type: str sample: example_load_balancer lifecycle_state: description: - The current state of the network load balancer. returned: on success type: str sample: CREATING lifecycle_details: description: - A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state. returned: on success type: str sample: lifecycle_details_example time_created: description: - The date and time the network load balancer was created, in the format defined by RFC3339. - "Example: `2020-05-01T21:10:29.600Z`" returned: on success type: str sample: "2020-05-01T21:10:29.600Z" time_updated: description: - The time the network load balancer was updated. An RFC3339 formatted date-time string. - "Example: `2020-05-01T22:10:29.600Z`" returned: on success type: str sample: "2020-05-01T22:10:29.600Z" ip_addresses: description: - An array of IP addresses. returned: on success type: complex contains: ip_address: description: - An IP address. - "Example: `192.168.0.3`" returned: on success type: str sample: 192.168.0.3 is_public: description: - Whether the IP address is public or private. - "If \\"true\\", then the IP address is public and accessible from the internet." - "If \\"false\\", then the IP address is private and accessible only from within the associated virtual cloud network." returned: on success type: bool sample: true reserved_ip: description: - "" returned: on success type: complex contains: id: description: - OCID of the reserved public IP address created with the virtual cloud network. - Reserved public IP addresses are IP addresses that are registered using the virtual cloud network API. - Create a reserved public IP address. When you create the network load balancer, enter the OCID of the reserved public IP address in the reservedIp field to attach the IP address to the network load balancer. This task configures the network load balancer to listen to traffic on this IP address. - Reserved public IP addresses are not deleted when the network load balancer is deleted. The IP addresses become unattached from the network load balancer. - "Example: \\"ocid1.publicip.oc1.phx.unique_ID\\"" returned: on success type: str sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx" is_private: description: - Whether the network load balancer has a virtual cloud network-local (private) IP address. - "If \\"true\\", then the service assigns a private IP address to the network load balancer." - "If \\"false\\", then the service assigns a public IP address to the network load balancer." - A public network load balancer is accessible from the internet, depending the L(security list rules,https://docs.cloud.oracle.com/Content/network/Concepts/securitylists.htm) for your virtual cloudn network. For more information about public and private network load balancers, see L(How Network Load Balancing Works,https://docs.cloud.oracle.com/Content/Balance/Concepts/balanceoverview.htm#how-network-load-balancing- works). This value is true by default. - "Example: `true`" returned: on success type: bool sample: true is_preserve_source_destination: description: - When enabled, the skipSourceDestinationCheck parameter is automatically enabled on the load balancer VNIC. Packets are sent to the backend set without any changes to the source and destination IP. returned: on success type: bool sample: true subnet_id: description: - "The subnet in which the network load balancer is spawned L(OCIDs,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).\\"" returned: on success type: str sample: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx" network_security_group_ids: description: - An array of network security groups L(OCIDs,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) associated with the network load balancer. - During the creation of the network load balancer, the service adds the new load balancer to the specified network security groups. - "The benefits of associating the network load balancer with network security groups include:" - "* Network security groups define network security rules to govern ingress and egress traffic for the network load balancer." - "* The network security rules of other resources can reference the network security groups associated with the network load balancer to ensure access." - "Example: [\\"ocid1.nsg.oc1.phx.unique_ID\\"]" returned: on success type: list sample: [] listeners: description: - Listeners associated with the network load balancer. returned: on success type: complex contains: name: description: - A friendly name for the listener. It must be unique and it cannot be changed. - "Example: `example_listener`" returned: on success type: str sample: example_listener default_backend_set_name: description: - The name of the associated backend set. - "Example: `example_backend_set`" returned: on success type: str sample: example_backend_set port: description: - The communication port for the listener. - "Example: `80`" returned: on success type: int sample: 0 protocol: description: - The protocol on which the listener accepts connection requests. For public network load balancers, ANY protocol refers to TCP/UDP. For private network load balancers, ANY protocol refers to TCP/UDP/ICMP (note that ICMP requires isPreserveSourceDestination to be set to true). To get a list of valid protocols, use the L(ListNetworkLoadBalancersProtocols,https://docs.cloud.oracle.com/en- us/iaas/api/#/en/NetworkLoadBalancer/20200501/networkLoadBalancerProtocol/ListNetworkLoadBalancersProtocols) operation. - "Example: `TCP`" returned: on success type: str sample: TCP backend_sets: description: - Backend sets associated with the network load balancer. returned: on success type: complex contains: name: description: - A user-friendly name for the backend set that must be unique and cannot be changed. - Valid backend set names include only alphanumeric characters, dashes, and underscores. Backend set names cannot contain spaces. Avoid entering confidential information. - "Example: `example_backend_set`" returned: on success type: str sample: example_backend_set policy: description: - The network load balancer policy for the backend set. - "Example: `FIVE_TUPLE`" returned: on success type: str sample: FIVE_TUPLE is_preserve_source: description: - If this parameter is enabled, then the network load balancer preserves the source IP of the packet when it is forwarded to backends. Backends see the original source IP. If the isPreserveSourceDestination parameter is enabled for the network load balancer resource, then this parameter cannot be disabled. The value is true by default. returned: on success type: bool sample: true backends: description: - Array of backends. returned: on success type: complex contains: name: description: - A read-only field showing the IP address/IP OCID and port that uniquely identify this backend server in the backend set. - "Example: `10.0.0.3:8080`, or `ocid1.privateip..oc1.<var>&lt;unique_ID&gt;</var>:443` or `10.0.0.3:0`" returned: on success type: str sample: 10.0.0.3:8080 ip_address: description: - "The IP address of the backend server. Example: `10.0.0.3`" returned: on success type: str sample: 10.0.0.3 target_id: description: - "The IP OCID/Instance OCID associated with the backend server. Example: `ocid1.privateip..oc1.<var>&lt;unique_ID&gt;</var>`" returned: on success type: str sample: "ocid1.privateip..oc1.unique_ID" port: description: - The communication port for the backend server. - "Example: `8080`" returned: on success type: int sample: 8080 weight: description: - The network load balancing policy weight assigned to the server. Backend servers with a higher weight receive a larger proportion of incoming traffic. For example, a server weighted '3' receives three times the number of new connections as a server weighted '1'. For more information about load balancing policies, see L(How Network Load Balancing Policies Work,https://docs.cloud.oracle.com/Content/Balance/Reference/lbpolicies.htm). - "Example: `3`" returned: on success type: int sample: 3 is_drain: description: - "Whether the network load balancer should drain this server. Servers marked \\"isDrain\\" receive no incoming traffic." - "Example: `false`" returned: on success type: bool sample: false is_backup: description: - "Whether the network load balancer should treat this server as a backup unit. If `true`, then the network load balancer forwards no ingress traffic to this backend server unless all other backend servers not marked as \\"isBackup\\" fail the health check policy." - "Example: `false`" returned: on success type: bool sample: false is_offline: description: - Whether the network load balancer should treat this server as offline. Offline servers receive no incoming traffic. - "Example: `false`" returned: on success type: bool sample: false health_checker: description: - "" returned: on success type: complex contains: protocol: description: - The protocol the health check must use; either HTTP or HTTPS, or UDP or TCP. - "Example: `HTTP`" returned: on success type: str sample: HTTP port: description: - The backend server port against which to run the health check. If the port is not specified, then the network load balancer uses the port information from the `Backend` object. The port must be specified if the backend port is 0. - "Example: `8080`" returned: on success type: int sample: 8080 retries: description: - "The number of retries to attempt before a backend server is considered \\"unhealthy\\". This number also applies when recovering a server to the \\"healthy\\" state. The default value is 3." - "Example: `3`" returned: on success type: int sample: 3 timeout_in_millis: description: - The maximum time, in milliseconds, to wait for a reply to a health check. A health check is successful only if a reply returns within this timeout period. The default value is 3000 (3 seconds). - "Example: `3000`" returned: on success type: int sample: 3000 interval_in_millis: description: - The interval between health checks, in milliseconds. The default value is 10000 (10 seconds). - "Example: `10000`" returned: on success type: int sample: 10000 url_path: description: - The path against which to run the health check. - "Example: `/healthcheck`" returned: on success type: str sample: /healthcheck response_body_regex: description: - A regular expression for parsing the response body from the backend server. - "Example: `^((?!false).|\\\\s)*$`" returned: on success type: str sample: response_body_regex_example return_code: description: - "The status code a healthy backend server should return. If you configure the health check policy to use the HTTP protocol, then you can use common HTTP status codes such as \\"200\\"." - "Example: `200`" returned: on success type: int sample: 0 request_data: description: - Base64 encoded pattern to be sent as UDP or TCP health check probe. returned: on success type: str sample: "example_request_data" response_data: description: - Base64 encoded pattern to be validated as UDP or TCP health check probe response. returned: on success type: str sample: "example_response_data" freeform_tags: description: - Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). - "Example: `{\\"Department\\": \\"Finance\\"}`" returned: on success type: dict sample: {'Department': 'Finance'} defined_tags: description: - Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). - "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`" returned: on success type: dict sample: {'Operations': {'CostCenter': 'US'}} system_tags: description: - "Key-value pair representing system tags' keys and values scoped to a namespace. Example: `{\\"bar-key\\": \\"value\\"}`" returned: on success type: dict sample: {} sample: { "id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx", "compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx", "display_name": "example_load_balancer", "lifecycle_state": "CREATING", "lifecycle_details": "lifecycle_details_example", "time_created": "2020-05-01T21:10:29.600Z", "time_updated": "2020-05-01T22:10:29.600Z", "ip_addresses": [{ "ip_address": "192.168.0.3", "is_public": true, "reserved_ip": { "id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx" } }], "is_private": true, "is_preserve_source_destination": true, "subnet_id": "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx", "network_security_group_ids": [], "listeners": { "name": "example_listener", "default_backend_set_name": "example_backend_set", "port": 0, "protocol": "TCP" }, "backend_sets": { "name": "example_backend_set", "policy": "FIVE_TUPLE", "is_preserve_source": true, "backends": [{ "name": "10.0.0.3:8080", "ip_address": "10.0.0.3", "target_id": "ocid1.privateip..oc1.unique_ID", "port": 8080, "weight": 3, "is_drain": false, "is_backup": false, "is_offline": false }], "health_checker": { "protocol": "HTTP", "port": 8080, "retries": 3, "timeout_in_millis": 3000, "interval_in_millis": 10000, "url_path": "/healthcheck", "response_body_regex": "response_body_regex_example", "return_code": 0, "request_data": UNKNOWN TYPE - str, "response_data": UNKNOWN TYPE - str } }, "freeform_tags": {'Department': 'Finance'}, "defined_tags": {'Operations': {'CostCenter': 'US'}}, "system_tags": {} } """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.oracle.oci.plugins.module_utils import ( oci_common_utils, oci_wait_utils, ) from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import ( OCIResourceHelperBase, get_custom_class, ) try: from oci.network_load_balancer import NetworkLoadBalancerClient from oci.network_load_balancer.models import CreateNetworkLoadBalancerDetails from oci.network_load_balancer.models import UpdateNetworkLoadBalancerDetails HAS_OCI_PY_SDK = True except ImportError: HAS_OCI_PY_SDK = False class NetworkLoadBalancerHelperGen(OCIResourceHelperBase): """Supported operations: create, update, get, list and delete""" def get_module_resource_id_param(self): return "network_load_balancer_id" def get_module_resource_id(self): return self.module.params.get("network_load_balancer_id") def get_get_fn(self): return self.client.get_network_load_balancer def get_resource(self): return oci_common_utils.call_with_backoff( self.client.get_network_load_balancer, network_load_balancer_id=self.module.params.get("network_load_balancer_id"), ) def get_required_kwargs_for_list(self): required_list_method_params = [ "compartment_id", ] return dict( (param, self.module.params[param]) for param in required_list_method_params ) def get_optional_kwargs_for_list(self): optional_list_method_params = ["display_name"] return dict( (param, self.module.params[param]) for param in optional_list_method_params if self.module.params.get(param) is not None and ( self._use_name_as_identifier() or ( not self.module.params.get("key_by") or param in self.module.params.get("key_by") ) ) ) def list_resources(self): required_kwargs = self.get_required_kwargs_for_list() optional_kwargs = self.get_optional_kwargs_for_list() kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs) return oci_common_utils.list_all_resources( self.client.list_network_load_balancers, **kwargs ) def
(self): return CreateNetworkLoadBalancerDetails def get_exclude_attributes(self): return ["reserved_ips"] def create_resource(self): create_details = self.get_create_model() return oci_wait_utils.call_and_wait( call_fn=self.client.create_network_load_balancer, call_fn_args=(), call_fn_kwargs=dict(create_network_load_balancer_details=create_details,), waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY, operation=oci_common_utils.CREATE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=oci_common_utils.get_work_request_completed_states(), ) def get_update_model_class(self): return UpdateNetworkLoadBalancerDetails def update_resource(self): update_details = self.get_update_model() return oci_wait_utils.call_and_wait( call_fn=self.client.update_network_load_balancer, call_fn_args=(), call_fn_kwargs=dict( network_load_balancer_id=self.module.params.get( "network_load_balancer_id" ), update_network_load_balancer_details=update_details, ), waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY, operation=oci_common_utils.UPDATE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=oci_common_utils.get_work_request_completed_states(), ) def delete_resource(self): return oci_wait_utils.call_and_wait( call_fn=self.client.delete_network_load_balancer, call_fn_args=(), call_fn_kwargs=dict( network_load_balancer_id=self.module.params.get( "network_load_balancer_id" ), ), waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY, operation=oci_common_utils.DELETE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=oci_common_utils.get_work_request_completed_states(), ) NetworkLoadBalancerHelperCustom = get_custom_class("NetworkLoadBalancerHelperCustom") class ResourceHelper(NetworkLoadBalancerHelperCustom, NetworkLoadBalancerHelperGen): pass def main(): module_args = oci_common_utils.get_common_arg_spec( supports_create=True, supports_wait=True ) module_args.update( dict( compartment_id=dict(type="str"), display_name=dict(aliases=["name"], type="str"), is_preserve_source_destination=dict(type="bool"), reserved_ips=dict( type="list", elements="dict", options=dict(id=dict(type="str")) ), is_private=dict(type="bool"), subnet_id=dict(type="str"), freeform_tags=dict(type="dict"), defined_tags=dict(type="dict"), network_load_balancer_id=dict(aliases=["id"], type="str"), state=dict(type="str", default="present", choices=["present", "absent"]), ) ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if not HAS_OCI_PY_SDK: module.fail_json(msg="oci python sdk required for this module.") resource_helper = ResourceHelper( module=module, resource_type="network_load_balancer", service_client_class=NetworkLoadBalancerClient, namespace="network_load_balancer", ) result = dict(changed=False) if resource_helper.is_delete_using_name(): result = resource_helper.delete_using_name() elif resource_helper.is_delete(): result = resource_helper.delete() elif resource_helper.is_update_using_name(): result = resource_helper.update_using_name() elif resource_helper.is_update(): result = resource_helper.update() elif resource_helper.is_create(): result = resource_helper.create() module.exit_json(**result) if __name__ == "__main__": main()
get_create_model_class
base64.go
package librarywrapper import ( "fmt" "github.com/k14s/ytt/pkg/template/core" "github.com/k14s/ytt/pkg/yttlibrary" "github.com/phil9909/ytt-lint/pkg/magic" "go.starlark.net/starlark" "go.starlark.net/starlarkstruct" ) var ( Base64APIWrapper = starlark.StringDict{ "base64": &starlarkstruct.Module{ Name: "base64", Members: starlark.StringDict{ "encode": starlark.NewBuiltin("base64.encode", core.ErrWrapper(base64Module{}.Encode)), "decode": starlark.NewBuiltin("base64.decode", core.ErrWrapper(base64Module{}.Decode)), }, }, } base64module = yttlibrary.Base64API["base64"].(*starlarkstruct.Module) ) func
() { yttlibrary.Base64API = Base64APIWrapper } type base64Module struct{} func (b base64Module) Encode(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if args.Len() != 1 { return starlark.None, fmt.Errorf("expected exactly one argument") } _, ok := args.Index(0).(*magic.MagicType) if ok { return &magic.MagicType{ CouldBeString: true, CouldBeInt: false, CouldBeFloat: false, }, nil } encode, err := base64module.Attr("encode") if err != nil { return starlark.None, err } return encode.(*starlark.Builtin).CallInternal(thread, args, kwargs) } func (b base64Module) Decode(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if args.Len() != 1 { return starlark.None, fmt.Errorf("expected exactly one argument") } _, ok := args.Index(0).(*magic.MagicType) if ok { return &magic.MagicType{ CouldBeString: true, CouldBeInt: false, CouldBeFloat: false, }, nil } decode, err := base64module.Attr("decode") if err != nil { return starlark.None, err } return decode.(*starlark.Builtin).CallInternal(thread, args, kwargs) }
init
server.rs
/* * Copyright (c) Meta Platforms, Inc. and its affiliates. * * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ use bytes::BytesMut; use futures::future; use std::io; use std::net::SocketAddr; use std::path::Path; use tokio::io::AsyncRead; use tokio::io::AsyncReadExt; use tokio::net::TcpListener; use tokio::net::TcpStream; use tokio::net::UnixListener; use tokio::net::UnixStream; use tokio::sync::mpsc; use tokio::sync::oneshot; use super::error::Error; use super::inferior::StoppedInferior; use super::packet::Packet; use super::session::Session; /// GdbServer controller pub struct GdbServer { /// Signal gdbserver to start. pub server_tx: Option<oneshot::Sender<()>>, /// Signal gdbserver the very first tracee is ready. pub inferior_attached_tx: Option<mpsc::Sender<StoppedInferior>>, /// FIXME: the tracees are serialized already, tell gdbserver not to /// serialize by its own. pub sequentialized_guest: bool, } impl GdbServer { /// Creates a GDB server and binds to the given address. /// /// NOTE: The canonical GDB server port is `1234`. pub async fn from_addr(addr: SocketAddr) -> Result<Self, Error> { let (inferior_attached_tx, inferior_attached_rx) = mpsc::channel(1); let (server_tx, server_rx) = oneshot::channel(); let server = GdbServerImpl::from_addr(addr, server_rx, inferior_attached_rx).await?; tokio::task::spawn(async move { if let Err(err) = server.run().await { tracing::error!("Failed to run gdbserver: {:?}", err); } }); Ok(Self { server_tx: Some(server_tx), inferior_attached_tx: Some(inferior_attached_tx), sequentialized_guest: false, }) } /// Creates a GDB server from the given unix domain socket. This is useful /// when we know there will only be one client and want to avoid binding to a /// port. pub async fn from_path(path: &Path) -> Result<Self, Error> { let (inferior_attached_tx, inferior_attached_rx) = mpsc::channel(1); let (server_tx, server_rx) = oneshot::channel(); let server = GdbServerImpl::from_path(path, server_rx, inferior_attached_rx).await?; tokio::task::spawn(async move { if let Err(err) = server.run().await { tracing::error!("Failed to run gdbserver: {:?}", err); } }); Ok(Self { server_tx: Some(server_tx), inferior_attached_tx: Some(inferior_attached_tx), sequentialized_guest: false, }) } pub fn sequentialized_guest(&mut self) -> &mut Self { self.sequentialized_guest = true; self } #[allow(unused)] pub async fn notify_start(&mut self) -> Result<(), Error> { if let Some(tx) = self.server_tx.take() { tx.send(()).map_err(|_| Error::GdbServerNotStarted) } else { Ok(()) } } #[allow(unused)] pub async fn notify_gdb_stop(&mut self, stopped: StoppedInferior) -> Result<(), Error> { if let Some(tx) = self.inferior_attached_tx.take() { tx.send(stopped) .await .map_err(|_| Error::GdbServerSendPacketError) } else { Ok(()) } } } struct GdbServerImpl { reader: Box<dyn AsyncRead + Send + Unpin>, pkt_tx: mpsc::Sender<Packet>, server_rx: Option<oneshot::Receiver<()>>, session: Option<Session>, } /// Binds to the given address and waits for an incoming connection. async fn wait_for_tcp_connection(addr: SocketAddr) -> io::Result<TcpStream> { // NOTE: `tokio::net::TcpListener::bind` is not used here on purpose. It // spawns an additional tokio worker thread. We want to avoid an extra // thread here since it could perturb the deterministic allocation of PIDs. // Using `std::net::TcpListener::bind` appears to avoid spawning an extra // tokio worker thread. let listener = std::net::TcpListener::bind(addr)?; listener.set_nonblocking(true)?; let listener = TcpListener::from_std(listener)?; let (stream, client_addr) = listener.accept().await?; tracing::info!("Accepting client connection: {:?}", client_addr); Ok(stream) } /// Binds to the given socket path and waits for an incoming connection. async fn wait_for_unix_connection(path: &Path) -> io::Result<UnixStream> { let listener = UnixListener::bind(path)?; let (stream, client_addr) = listener.accept().await?; tracing::info!("Accepting client connection: {:?}", client_addr); Ok(stream) } // NB: during handshake, gdb may send packet prefixed with `+' (Ack), or send // `+' then the actual packet (send two times). Since Ack is also a valid packet // This may cause confusion to Packet::try_from(), since it tries to decode one // packet at a time. enum PacketWithAck { // Just a packet, note `+' only is considered to be `JustPacket'. JustPacket(Packet), // `+' (Ack) followed by a packet, such as `+StartNoAckMode'. WithAck(Packet), } const PACKET_BUFFER_CAPACITY: usize = 0x8000; impl GdbServerImpl { /// Creates a new gdbserver, by accepting remote connection at `addr`. async fn from_addr( addr: SocketAddr, server_rx: oneshot::Receiver<()>, inferior_attached_rx: mpsc::Receiver<StoppedInferior>, ) -> Result<Self, Error> { let stream = wait_for_tcp_connection(addr) .await .map_err(|source| Error::WaitForGdbConnect { source })?; let (reader, writer) = stream.into_split(); let (tx, rx) = mpsc::channel(1); // create a gdb session. let session = Session::new(Box::new(writer), rx, inferior_attached_rx); Ok(GdbServerImpl { reader: Box::new(reader), pkt_tx: tx, server_rx: Some(server_rx), session: Some(session), }) } /// Creates a GDB server and listens on the given unix domain socket. async fn from_path( path: &Path, server_rx: oneshot::Receiver<()>, inferior_attached_rx: mpsc::Receiver<StoppedInferior>, ) -> Result<Self, Error> { let stream = wait_for_unix_connection(path) .await .map_err(|source| Error::WaitForGdbConnect { source })?; let (reader, writer) = stream.into_split(); let (tx, rx) = mpsc::channel(1); // Create a gdb session. let session = Session::new(Box::new(writer), rx, inferior_attached_rx); Ok(GdbServerImpl { reader: Box::new(reader), pkt_tx: tx, server_rx: Some(server_rx), session: Some(session), }) } async fn recv_packet(&mut self) -> Result<PacketWithAck, Error> { let mut rx_buf = BytesMut::with_capacity(PACKET_BUFFER_CAPACITY); self.reader .read_buf(&mut rx_buf) .await .map_err(|_| Error::ConnReset)?; // packet to follow, such as `+StartNoAckMode`. Ok(if rx_buf.starts_with(b"+") && rx_buf.len() > 1 { PacketWithAck::WithAck(Packet::new(rx_buf.split_off(1))?) } else { PacketWithAck::JustPacket(Packet::new(rx_buf.split())?) }) } async fn
(&mut self, packet: Packet) -> Result<(), Error> { self.pkt_tx .send(packet) .await .map_err(|_| Error::GdbServerSendPacketError) } async fn relay_gdb_packets(&mut self) -> Result<(), Error> { while let Ok(pkt) = self.recv_packet().await { match pkt { PacketWithAck::JustPacket(pkt) => { self.send_packet(Packet::Ack).await?; self.send_packet(pkt).await?; } PacketWithAck::WithAck(pkt) => self.send_packet(pkt).await?, } } // remote client closed connection. Ok(()) } /// Run gdbserver. /// /// The gdbserver can run in a separate tokio thread pool. /// /// ```no_compile /// let gdbserver = GdbServer::new(..).await?; /// let handle = tokio::task::spawn(gdbserver.run()); /// // snip /// handle.await?? /// ``` async fn run(mut self) -> Result<(), Error> { // NB: waiting for initial request to start gdb server. This is // required because if gdbserver is started too soon, gdb (client) // could get timeout. Some requests such as `g' needs IPC with a // gdb session, which only becomes ready later. if let Some(server_rx) = self.server_rx.take() { let _ = server_rx.await.map_err(|_| Error::GdbServerNotStarted)?; let mut session = self.session.take().ok_or(Error::SessionNotStarted)?; let run_session = session.run(); let run_loop = self.relay_gdb_packets(); future::try_join(run_session, run_loop).await?; } Ok(()) } }
send_packet
model_trainer_c4d127b7cc8008ff2c0c849733ead6e1.py
""" This script creates an instance of a sacred experiment and defines default configurations for training a neural network or a regression model. """ from src.neural_nets.models import get_model from src.neural_nets.load_data import get_loader from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss import src.regression.logistic_regression as reg import os import numpy as np import torch import torch.nn as nn import torch.optim as optim import torchsso.optim as soptim import torch.nn.functional as F import random from torch.utils.data import DataLoader from sacred import Experiment from torch import Tensor, device from copy import deepcopy from time import sleep from tqdm import tqdm from typing import List from itertools import product # create a new sacred experiment whose name is an integer ex = Experiment(name=str(random.randint(0, 1000000))) # default configurations @ex.config def cfg(): # system cuda = torch.cuda.is_available() gpu = 0 base_dir = os.getcwd() # supported datasets # JSB_Chorales (short) # Nottingham (medium) # Piano_midi (long) # MuseData (extra long) dataset = "JSB_Chorales" # training num_epochs = 150 batch_size = 128 # mask some low notes and some high notes because they never show up low_off_notes = 0 high_off_notes = 88 lr = 0.001 decay = 1.0 optmzr = "SGD" regularization = 0.0 # hyperparameter search do_hpsearch = False learning_rates = 10**np.linspace(-2, -4, 5) decays = 1 - np.linspace(0, 0.1, num=5) regularizations = 10**np.linspace(-2, -4, num=5) hps_epochs = 50 # Supported architectures # REGRESSION # LDS # TANH architecture = 'LDS' readout = 'linear' gradient_clipping = 1 jit = False # not fully implemented # for regression lag = 1 window = 1 # for neural networks input_size = 88 hidden_size = 300 num_layers = 1 output_size = 88 # see models.py and initialization.py for details init = 'default' scale = 1.0 parity = None # see models.py t_distrib = torch.distributions.Uniform(0, 0.75) path = 'results/77/final_state_dict.pt' # when to save state dictionaries save_init_model = True save_final_model = True save_every_epoch = False # detect backprop anomalies detect_anomaly = False # give all random number generators the same seed def _seed_all(_seed) -> None: torch.manual_seed(_seed) np.random.seed(_seed) random.seed(_seed) # this context is used when we are running things on the cpu class
(object): def __init__(self): pass def __enter__(self): pass def __exit__(self, type, value, traceback): pass # this function simply trains regression models and logs the results # see regression.trainer for details @ex.capture def sklearn_experiment(dataset: str, save_dir: str, num_epochs: int, high_off_notes: int, low_off_notes: int, lag: int, window: int, _seed, _log, _run): """ :param dataset: name of the dataset to be used :save_dir: temporary directory where artifacts are being stored :lag: how many time steps into the future the regression model is to predict :window: how many time steps the regression model is to take into account :param _seed: sacred random seed :param _log: sacred object used to output to the command line :param _run: sacred object used to monitor the runtime """ num_notes = high_off_notes - low_off_notes models = reg.train_models(dataset, num_epochs, low_off_notes, high_off_notes, _seed, lag=lag, window=window) coefs = np.zeros((num_notes, num_notes*window)) intercepts = np.zeros(num_notes*window) for i in range(num_notes): model = models[i] # if there were no notes played for this channel, a model won't be trained # simply save all parameters as -1 to discourage the note from being played if model == None: coefs[i] = -1 intercepts[i] = -1 else: coefs[i] = model.coef_ intercepts[i] = model.intercept_ np.save(save_dir + 'coefs.npy', coefs) np.save(save_dir + 'intercepts.npy', intercepts) _run.add_artifact(save_dir + 'coefs.npy') _run.add_artifact(save_dir + 'intercepts.npy') train_loss = reg.compute_loss(models, dataset, 'traindata', low_off_notes, high_off_notes, lag=lag, window=window) test_loss = reg.compute_loss(models, dataset, 'testdata', low_off_notes, high_off_notes, lag=lag, window=window) valid_loss = reg.compute_loss(models, dataset, 'validdata', low_off_notes, high_off_notes, lag=lag, window=window) _run.log_scalar('trainLoss', train_loss) _run.log_scalar('testLoss', test_loss) _run.log_scalar('validLoss', valid_loss) train_acc = reg.compute_accuracy(models, dataset, 'traindata', low_off_notes, high_off_notes, lag=lag, window=window) test_acc = reg.compute_accuracy(models, dataset, 'testdata', low_off_notes, high_off_notes, lag=lag, window=window) valid_acc = reg.compute_accuracy(models, dataset, 'validdata', low_off_notes, high_off_notes, lag=lag, window=window) _run.log_scalar('trainAccuracy', train_acc) _run.log_scalar('testAccuracy', test_acc) _run.log_scalar('validAccuracy', valid_acc) # a single optimization step @ex.capture def train_iter(device: device, cuda_device: torch.cuda.device, input_tensor: Tensor, target: Tensor, mask: Tensor, model: nn.Module, loss_fcn: nn.Module, optimizer: optim.Optimizer, save_every_epoch: bool, save_dir: str, train_loader: DataLoader, test_loader: DataLoader, valid_loader: DataLoader, low_off_notes: int, high_off_notes: int, _log, _run, logging=True): input_tensor = input_tensor.to(device) # number of songs in this batch N = input_tensor.shape[0] output, hidden_tensors = model(input_tensor) loss = loss_fcn(output, target, mask, model)/N optimizer.zero_grad() loss.backward() optimizer.step() # use sacred to log training loss and accuracy if logging: train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes) _run.log_scalar("trainLoss", loss.cpu().detach().item()) _run.log_scalar("trainAccuracy", train_acc) # save a copy of the model and make sacred remember it each epoch if save_every_epoch and logging: sd = deepcopy(model.state_dict()) torch.save(init_sd, save_dir + 'state_dict_' + str(epoch) + '.pt') _run.add_artifact(save_dir + 'state_dict_' + str(epoch) + '.pt') # train a neural network # returns the final loss and accuracy on the training, testing, and validation sets @ex.capture def pytorch_train_loop(cuda: bool, model_dict: dict, initializer: dict, train_loader: DataLoader, test_loader: DataLoader, valid_loader: DataLoader, low_off_notes: int, high_off_notes: int, optmzr: str, lr: float, decay: float, regularization: float, num_epochs: int, save_dir: str, save_init_model, save_every_epoch, save_final_model, _seed, _log, _run, logging=True): # construct and initialize the model model = get_model(model_dict, initializer, cuda) # save a copy of the initial model and make sacred remember it if save_init_model and logging: init_sd = deepcopy(model.state_dict()) torch.save(init_sd, save_dir + 'initial_state_dict.pt') _run.add_artifact(save_dir + 'initial_state_dict.pt') # if we are on cuda we construct the device and run everything on it cuda_device = NullContext() device = torch.device('cpu') if cuda: dev_name = 'cuda:' + str(gpu) cuda_device = torch.cuda.device(dev_name) device = torch.device(dev_name) model = model.to(device) with cuda_device: # see metrics.py loss_fcn = MaskedBCE(regularization, low_off_notes=low_off_notes, high_off_notes=high_off_notes) # compute the metrics before training and log them if logging: train_loss = compute_loss(loss_fcn, model, train_loader) test_loss = compute_loss(loss_fcn, model, test_loader) val_loss = compute_loss(loss_fcn, model, valid_loader) _run.log_scalar("trainLoss", train_loss) _run.log_scalar("testLoss", test_loss) _run.log_scalar("validLoss", val_loss) train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes) test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes) val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes) _run.log_scalar("trainAccuracy", train_acc) _run.log_scalar("testAccuracy", test_acc) _run.log_scalar("validAccuracy", val_acc) # construct the optimizer optimizer = None if optmzr == "SGD": optimizer = optim.SGD(model.parameters(), lr=lr) elif optmzr == "Adam": optimizer = optim.Adam(model.parameters(), lr=lr) elif optmzr == "RMSprop": optimizer = optim.RMSprop(model.parameters(), lr=lr) else: raise ValueError("Optimizer {} not recognized.".format(optmzr)) # learning rate decay scheduler = None scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: decay**epoch) # begin training loop for epoch in tqdm(range(num_epochs)): for input_tensor, target, mask in train_loader: train_iter(device, cuda_device, input_tensor, target, mask, model, loss_fcn, optimizer, save_every_epoch, save_dir, train_loader, test_loader, valid_loader, low_off_notes, high_off_notes, _log, _run, logging=logging) # learning rate decay scheduler.step() # use sacred to log testing and validation loss and accuracy if logging: test_loss = compute_loss(loss_fcn, model, test_loader) val_loss = compute_loss(loss_fcn, model, valid_loader) test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes) val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes) _run.log_scalar("testLoss", test_loss) _run.log_scalar("validLoss", val_loss) _run.log_scalar("testAccuracy", test_acc) _run.log_scalar("validAccuracy", val_acc) # save a copy of the trained model and make sacred remember it if save_final_model and logging: fin_sd = deepcopy(model.state_dict()) torch.save(fin_sd, save_dir + 'final_state_dict.pt') _run.add_artifact(save_dir + 'final_state_dict.pt') # recompute the metrics so that this function can return them train_loss = compute_loss(loss_fcn, model, train_loader) test_loss = compute_loss(loss_fcn, model, test_loader) val_loss = compute_loss(loss_fcn, model, valid_loader) train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes) test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes) val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes) return ((train_loss, test_loss, val_loss), (train_acc, test_acc, val_acc)) # main function @ex.automain def train_loop(cuda, gpu, base_dir, dataset, num_epochs, batch_size, low_off_notes, high_off_notes, lr, decay, optmzr, regularization, do_hpsearch, learning_rates, decays, regularizations, hps_epochs, architecture, readout, gradient_clipping, jit, lag, window, input_size, hidden_size, num_layers, output_size, detect_anomaly, init, scale, parity, t_distrib, path, save_init_model, save_final_model, save_every_epoch, _seed, _log, _run): # save artifacts to a temporary directory that gets erased when the experiment is over save_dir = base_dir + '/tmp_' + str(_seed) os.system('mkdir ' + save_dir) save_dir += '/' # give all random number generators the same seed _seed_all(_seed) sklearn_program = architecture == 'REGRESSION' # regression models and neural networks are trained very differently if sklearn_program: sklearn_experiment(dataset, save_dir, num_epochs, high_off_notes, low_off_notes, lag, window, _seed, _log, _run) # run a pytorch program else: model_dict = {'architecture': architecture, 'readout': readout, 'gradient_clipping': gradient_clipping, 'jit': jit, 'lag': lag, 'window': window, 'input_size': input_size, 'hidden_size': hidden_size, 'num_layers': num_layers, 'output_size': output_size } initializer = {'init': init, 'scale': scale, 'parity': parity, 't_distrib': t_distrib, 'path': path, 'low_off_notes': low_off_notes, 'high_off_notes': high_off_notes } # if we are debugging we may want to detect autograd anomalies torch.autograd.set_detect_anomaly(detect_anomaly) # construct the pytorch data loaders train_loader, test_loader, valid_loader = get_loader(dataset, batch_size) # standard training loop if not do_hpsearch: # the training loop function returns the metrics achieved at the end of training # they will be logged by default, no need to do anything with them here metrics = pytorch_train_loop(cuda, model_dict, initializer, train_loader, test_loader, valid_loader, low_off_notes, high_off_notes, optmzr, lr, decay, regularization, num_epochs, save_dir, save_init_model, save_every_epoch, save_final_model, _seed, _log, _run) # only goal here is to find the best hyper parameters else: min_test_loss = float('inf') best_lr = 0 best_dcay = 0 best_reg = 0 hyperparams = product(learning_rates, decays, regularizations) for rate, dcay, reg in hyperparams: # train a model with the given hyperparameters # don't log anything, otherwise we will have a ridiculous amount of extraneous info metrics = pytorch_train_loop(cuda, model_dict, initializer, train_loader, test_loader, valid_loader, optmzr, rate, dcay, reg, hps_epochs, save_dir, save_init_model, save_every_epoch, save_final_model, _seed, _log, _run, logging=False) # loss is first index, test set is second index test_loss = metrics[0][1] # compare loss against other hyperparams and update if necessary if test_loss == test_loss and test_loss < min_test_loss: min_test_loss = test_loss best_lr = rate best_dcay = dcay best_reg = reg # record the best hyperparameters _run.log_scalar("learning_rate", best_lr) _run.log_scalar("decay", best_dcay) _run.log_scalar("regularization", best_reg) # wait a second then remove the temporary directory used for storing artifacts sleep(1) os.system('rm -r ' + save_dir)
NullContext
mixer.rs
//! A mixer processing node. //! //! A mixer can have multiple consumer slots, which will be routed //! through `compositor` and `audiomixer` elements. use actix::prelude::*; use anyhow::{anyhow, Error}; use gst::prelude::*; use gst_base::prelude::*; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use tracing::{debug, error, instrument, trace}; use auteur_controlling::controller::{ MixerCommand, MixerConfig, MixerInfo, MixerSlotInfo, NodeInfo, State, }; use crate::node::{ ConsumerMessage, GetNodeInfoMessage, GetProducerMessage, MixerCommandMessage, NodeManager, NodeStatusMessage, ScheduleMessage, StartMessage, StopMessage, StoppedMessage, }; use crate::utils::{ make_element, ErrorMessage, PipelineManager, Schedulable, StateChangeResult, StateMachine, StopManagerMessage, StreamProducer, }; const DEFAULT_FALLBACK_TIMEOUT: u32 = 500; /// Represents a connection to a producer struct ConsumerSlot { /// Video producer video_producer: StreamProducer, /// Audio producer audio_producer: StreamProducer, /// Video input to `compositor` video_appsrc: gst_app::AppSrc, /// Audio input to `audiomixer` audio_appsrc: gst_app::AppSrc, /// Processing elements before `compositor` video_bin: Option<gst::Bin>, /// Processing elements before `audiomixer` audio_bin: Option<gst::Bin>, /// Volume of the `audiomixer` pad volume: f64, /// Used to reconfigure the geometry of the input video stream video_capsfilter: Option<gst::Element>, } /// Used from our `compositor::samples_selected` callback #[derive(Debug)] pub struct MixingState { /// For how long no pad other than our base plate has selected samples base_plate_timeout: gst::ClockTime, /// Whether our base plate is opaque showing_base_plate: bool, } /// The Mixer actor pub struct Mixer { /// Unique identifier id: String, /// The wrapped pipeline pipeline: gst::Pipeline, /// A helper for managing the pipeline pipeline_manager: Option<Addr<PipelineManager>>, /// Output video producer video_producer: StreamProducer, /// Output audio producer audio_producer: StreamProducer, /// Input connection points consumer_slots: HashMap<String, ConsumerSlot>, /// `compositor` audio_mixer: Option<gst::Element>, /// `audiomixer` video_mixer: Option<gst::Element>, /// Mixing geometry and format config: MixerConfig, /// Used for showing and hiding the base plate mixing_state: Arc<Mutex<MixingState>>, /// Optional timeout for showing the base plate fallback_timeout: gst::ClockTime, /// For controlling the output sample rate audio_capsfilter: Option<gst::Element>, /// For resizing the base plate base_plate_capsfilter: Option<gst::Element>, /// For resizing our output video stream video_capsfilter: Option<gst::Element>, /// Our state machine state_machine: StateMachine, } impl Actor for Mixer { type Context = Context<Self>; #[instrument(level = "debug", name = "starting", skip(self, ctx), fields(id = %self.id))] fn started(&mut self, ctx: &mut Self::Context) { self.pipeline_manager = Some( PipelineManager::new( self.pipeline.clone(), ctx.address().downgrade().recipient(), &self.id, ) .start(), ); } #[instrument(level = "debug", name = "stopped", skip(self, _ctx), fields(id = %self.id))] fn stopped(&mut self, _ctx: &mut Self::Context) { if let Some(manager) = self.pipeline_manager.take() { let _ = manager.do_send(StopManagerMessage); } for (id, slot) in self.consumer_slots.drain() { slot.video_producer.remove_consumer(&id); slot.audio_producer.remove_consumer(&id); } NodeManager::from_registry().do_send(StoppedMessage { id: self.id.clone(), video_producer: Some(self.video_producer.clone()), audio_producer: Some(self.video_producer.clone()), }); } } impl Mixer { /// Create a mixer pub fn new(id: &str, config: MixerConfig) -> Self { let pipeline = gst::Pipeline::new(None); let audio_appsink = gst::ElementFactory::make("appsink", Some(&format!("mixer-audio-appsink-{}", id))) .unwrap() .downcast::<gst_app::AppSink>() .unwrap(); let video_appsink = gst::ElementFactory::make("appsink", Some(&format!("mixer-video-appsink-{}", id))) .unwrap() .downcast::<gst_app::AppSink>() .unwrap(); pipeline .add_many(&[&audio_appsink, &video_appsink]) .unwrap(); let fallback_timeout = config.fallback_timeout.unwrap_or(DEFAULT_FALLBACK_TIMEOUT); Self { id: id.to_string(), pipeline, pipeline_manager: None, audio_producer: StreamProducer::from(&audio_appsink), video_producer: StreamProducer::from(&video_appsink), consumer_slots: HashMap::new(), audio_mixer: None, video_mixer: None, config, mixing_state: Arc::new(Mutex::new(MixingState { base_plate_timeout: gst::CLOCK_TIME_NONE, showing_base_plate: true, })), fallback_timeout: fallback_timeout as u64 * gst::MSECOND, audio_capsfilter: None, video_capsfilter: None, base_plate_capsfilter: None, state_machine: StateMachine::default(), } } /// Connect an input slot to `compositor` and `audiomixer` #[instrument( level = "debug", name = "connecting", skip(pipeline, slot, vmixer, amixer) )] fn connect_slot( pipeline: &gst::Pipeline, slot: &mut ConsumerSlot, vmixer: &gst::Element, amixer: &gst::Element, mixer_id: &str, id: &str, config: &MixerConfig, ) -> Result<(), Error> { let video_bin = gst::Bin::new(None); let audio_bin = gst::Bin::new(None); let aconv = make_element("audioconvert", None)?; let aresample = make_element("audioresample", None)?; let acapsfilter = make_element("capsfilter", None)?; let aqueue = make_element("queue", None)?; let vqueue = make_element("queue", None)?; // FIXME: https://gitlab.freedesktop.org/gstreamer/gst-plugins-base/-/merge_requests/1156 let vconv = make_element("videoconvert", None)?; let vscale = make_element("videoscale", None)?; let vcapsfilter = make_element("capsfilter", None)?; vcapsfilter .set_property( "caps", &gst::Caps::builder("video/x-raw") .field("width", &config.width) .field("height", &config.height) .field("pixel-aspect-ratio", &gst::Fraction::new(1, 1)) .build(), ) .unwrap(); acapsfilter .set_property( "caps", &gst::Caps::builder("audio/x-raw") .field("channels", &2) .field("format", &"S16LE") .field("rate", &96000) .build(), ) .unwrap(); let vappsrc_elem: &gst::Element = slot.video_appsrc.upcast_ref(); let aappsrc_elem: &gst::Element = slot.audio_appsrc.upcast_ref(); video_bin.add_many(&[vappsrc_elem, &vconv, &vscale, &vcapsfilter, &vqueue])?; audio_bin.add_many(&[aappsrc_elem, &aconv, &aresample, &acapsfilter, &aqueue])?; pipeline.add_many(&[&video_bin, &audio_bin])?; video_bin.sync_state_with_parent()?; audio_bin.sync_state_with_parent()?; let ghost = gst::GhostPad::with_target(Some("src"), &vqueue.static_pad("src").unwrap()).unwrap(); video_bin.add_pad(&ghost).unwrap(); let ghost = gst::GhostPad::with_target(Some("src"), &aqueue.static_pad("src").unwrap()).unwrap(); audio_bin.add_pad(&ghost).unwrap(); let amixer_pad = amixer.request_pad_simple("sink_%u").unwrap(); let vmixer_pad = vmixer.request_pad_simple("sink_%u").unwrap(); amixer_pad.set_property("volume", &slot.volume).unwrap(); gst::Element::link_many(&[aappsrc_elem, &aconv, &aresample, &acapsfilter, &aqueue])?; gst::Element::link_many(&[vappsrc_elem, &vconv, &vscale, &vcapsfilter, &vqueue])?; let srcpad = audio_bin.static_pad("src").unwrap(); srcpad.link(&amixer_pad).unwrap(); let srcpad = video_bin.static_pad("src").unwrap(); srcpad.link(&vmixer_pad).unwrap(); slot.audio_bin = Some(audio_bin); slot.video_bin = Some(video_bin); slot.video_capsfilter = Some(vcapsfilter); slot.video_producer.add_consumer(&slot.video_appsrc, id); slot.audio_producer.add_consumer(&slot.audio_appsrc, id); Ok(()) } /// Build the base plate. It may be either a live videotestsrc, or an /// imagefreeze'd image when a fallback image was specified #[instrument(level = "debug", name = "building base plate", skip(self), fields(id = %self.id))] fn build_base_plate(&mut self) -> Result<gst::Element, Error> { let bin = gst::Bin::new(None); let ghost = match self.config.fallback_image.as_ref() { Some(path) => { let filesrc = make_element("filesrc", None)?; let decodebin = make_element("decodebin3", None)?; let vconv = make_element("videoconvert", None)?; let imagefreeze = make_element("imagefreeze", None)?; /* We have to rescale after imagefreeze for now, as we might * need to update the resolution dynamically */ let vscale = make_element("videoscale", None)?; let capsfilter = make_element("capsfilter", None)?; filesrc.set_property("location", path).unwrap(); imagefreeze.set_property("is-live", &true).unwrap(); capsfilter .set_property( "caps", &gst::Caps::builder("video/x-raw") .field("width", &self.config.width) .field("height", &self.config.height) .field("pixel-aspect-ratio", &gst::Fraction::new(1, 1)) .build(), ) .unwrap(); bin.add_many(&[ &filesrc, &decodebin, &imagefreeze, &vconv, &vscale, &capsfilter, ])?; let imagefreeze_clone = imagefreeze.downgrade(); decodebin.connect_pad_added(move |_bin, pad| { if let Some(imagefreeze) = imagefreeze_clone.upgrade() { let sinkpad = imagefreeze.static_pad("sink").unwrap(); pad.link(&sinkpad).unwrap(); } }); filesrc.link(&decodebin)?; gst::Element::link_many(&[&imagefreeze, &vconv, &vscale, &capsfilter])?; self.base_plate_capsfilter = Some(capsfilter.clone()); gst::GhostPad::with_target(Some("src"), &capsfilter.static_pad("src").unwrap()) .unwrap() } None => { let vsrc = make_element("videotestsrc", None)?; vsrc.set_property("is-live", &true).unwrap(); vsrc.set_property_from_str("pattern", "black"); bin.add(&vsrc)?; gst::GhostPad::with_target(Some("src"), &vsrc.static_pad("src").unwrap()).unwrap() } }; bin.add_pad(&ghost).unwrap(); Ok(bin.upcast()) } /// Show or hide our base plate. Will be used in the future for interpolating /// properties of mixer pads #[instrument(name = "Updating mixing state", level = "trace")] fn update_mixing_state( agg: &gst_base::Aggregator, id: &str, pts: gst::ClockTime, mixing_state: &mut MixingState, timeout: gst::ClockTime, ) { let mut base_plate_only = true; let base_plate_pad = agg.static_pad("sink_0").unwrap(); for pad in agg.sink_pads() { if pad == base_plate_pad { continue; } let agg_pad: &gst_base::AggregatorPad = pad.downcast_ref().unwrap(); if let Some(sample) = agg.peek_next_sample(agg_pad) { trace!(pad = %pad.name(), "selected non-base plate sample {:?}", sample); base_plate_only = false; break; } } if base_plate_only { if mixing_state.base_plate_timeout.is_none() { mixing_state.base_plate_timeout = pts; } else if !mixing_state.showing_base_plate && pts - mixing_state.base_plate_timeout > timeout { debug!("falling back to base plate {:?}", base_plate_pad); base_plate_pad.set_property("alpha", &1.0f64).unwrap(); mixing_state.showing_base_plate = true; } } else { if mixing_state.showing_base_plate { debug!("hiding base plate: {:?}", base_plate_pad); base_plate_pad.set_property("alpha", &0.0f64).unwrap(); mixing_state.showing_base_plate = false; } mixing_state.base_plate_timeout = gst::CLOCK_TIME_NONE; } } /// Start our pipeline when cue_time is reached #[instrument(level = "debug", name = "mixing", skip(self, ctx), fields(id = %self.id))] fn start_pipeline(&mut self, ctx: &mut Context<Self>) -> Result<StateChangeResult, Error> { let vsrc = self.build_base_plate()?; let vqueue = make_element("queue", None)?; let vmixer = make_element("compositor", Some("compositor"))?; let vcapsfilter = make_element("capsfilter", None)?; let asrc = make_element("audiotestsrc", None)?; let asrccapsfilter = make_element("capsfilter", None)?; let aqueue = make_element("queue", None)?; let amixer = make_element("audiomixer", Some("audiomixer"))?; let acapsfilter = make_element("capsfilter", None)?; let level = make_element("level", None)?; let aresample = make_element("audioresample", None)?; let aresamplecapsfilter = make_element("capsfilter", None)?; vmixer.set_property_from_str("background", "black"); vmixer .set_property( "start-time-selection", &gst_base::AggregatorStartTimeSelection::First, ) .unwrap(); debug!("stream config: {:?}", self.config); vcapsfilter .set_property( "caps", &gst::Caps::builder("video/x-raw") .field("width", &self.config.width) .field("height", &self.config.height) .field("framerate", &gst::Fraction::new(30, 1)) .field("pixel-aspect-ratio", &gst::Fraction::new(1, 1)) .field("format", &"I420") .field("colorimetry", &"bt601") .field("chroma-site", &"jpeg") .field("interlace-mode", &"progressive") .build(), ) .unwrap(); asrc.set_property("is-live", &true).unwrap(); asrc.set_property("volume", &0.).unwrap(); amixer .set_property( "start-time-selection", &gst_base::AggregatorStartTimeSelection::First, ) .unwrap(); // FIXME: audiomixer doesn't deal very well with audio rate changes, // for now the solution is to simply pick a very high sample rate // (96000 was picked because it is the maximum rate faac supports), // and never change that fixed rate in the mixer, simply modulating // it downstream according to what the application requires. // // Alternatively, we could avoid exposing that config switch, and // always output 48000, which should be more than enough for anyone asrccapsfilter .set_property( "caps", &gst::Caps::builder("audio/x-raw") .field("channels", &2) .field("format", &"S16LE") .field("rate", &96000) .build(), ) .unwrap(); acapsfilter .set_property( "caps", &gst::Caps::builder("audio/x-raw") .field("channels", &2) .field("format", &"S16LE") .field("rate", &96000) .build(), ) .unwrap(); aresamplecapsfilter .set_property( "caps", &gst::Caps::builder("audio/x-raw") .field("rate", &self.config.sample_rate) .build(), ) .unwrap(); self.pipeline.add_many(&[ &vsrc, &vqueue, &vmixer, &vcapsfilter, &asrc, &asrccapsfilter, &aqueue, &amixer, &acapsfilter, &level, &aresample, &aresamplecapsfilter, ])?; gst::Element::link_many(&[ &vsrc, &vqueue, &vmixer, &vcapsfilter, self.video_producer.appsink().upcast_ref(), ])?; gst::Element::link_many(&[ &asrc, &asrccapsfilter, &aqueue, &amixer, &acapsfilter, &level, &aresample, &aresamplecapsfilter, self.audio_producer.appsink().upcast_ref(), ])?; for (id, slot) in self.consumer_slots.iter_mut() { Mixer::connect_slot( &self.pipeline, slot, &vmixer, &amixer, &self.id, id, &self.config, )?; } let mixing_state = self.mixing_state.clone(); let id = self.id.clone(); let timeout = self.fallback_timeout; vmixer.set_property("emit-signals", &true).unwrap(); vmixer .downcast_ref::<gst_base::Aggregator>() .unwrap() .connect_samples_selected( move |agg: &gst_base::Aggregator, _segment, pts, _dts, _duration, _info| { let mut mixing_state = mixing_state.lock().unwrap(); Mixer::update_mixing_state(agg, &id, pts, &mut *mixing_state, timeout); }, ); self.video_mixer = Some(vmixer); self.audio_mixer = Some(amixer); self.video_capsfilter = Some(vcapsfilter); self.audio_capsfilter = Some(aresamplecapsfilter); let addr = ctx.address(); let id = self.id.clone(); self.pipeline.call_async(move |pipeline| { if let Err(err) = pipeline.set_state(gst::State::Playing) { let _ = addr.do_send(ErrorMessage(format!( "Failed to start mixer {}: {}", id, err ))); } }); self.video_producer.forward(); self.audio_producer.forward(); Ok(StateChangeResult::Success) } #[instrument(level = "debug", name = "updating slot volume", skip(self), fields(id = %self.id))] fn set_slot_volume(&mut self, slot_id: &str, volume: f64) -> Result<(), Error> { if !(0. ..=10.).contains(&volume) { return Err(anyhow!("invalid slot volume: {}", volume)); } if let Some(mut slot) = self.consumer_slots.get_mut(slot_id) { slot.volume = volume; if let Some(ref audio_bin) = slot.audio_bin { let mixer_pad = audio_bin.static_pad("src").unwrap().peer().unwrap(); mixer_pad.set_property("volume", &volume).unwrap(); } Ok(()) } else { Err(anyhow!("mixer {} has no slot with id {}", self.id, slot_id)) } } /// Implement UpdateConfig #[instrument(level = "debug", name = "updating config", skip(self), fields(id = %self.id))] fn update_config( &mut self, width: Option<i32>, height: Option<i32>, sample_rate: Option<i32>, ) -> Result<(), Error> { if let Some(width) = width { self.config.width = width; } if let Some(height) = height { self.config.height = height; } if let Some(sample_rate) = sample_rate { self.config.sample_rate = sample_rate; } // FIXME: do this atomically from selected_samples for tear-free transition // once https://gitlab.freedesktop.org/gstreamer/gst-plugins-base/-/merge_requests/1156 is // in if let Some(capsfilter) = &self.video_capsfilter { debug!("updating output resolution"); capsfilter .set_property( "caps", &gst::Caps::builder("video/x-raw") .field("width", &self.config.width) .field("height", &self.config.height) .field("framerate", &gst::Fraction::new(30, 1)) .field("pixel-aspect-ratio", &gst::Fraction::new(1, 1)) .field("format", &"I420") .field("colorimetry", &"bt601") .field("chroma-site", &"jpeg") .field("interlace-mode", &"progressive") .build(), ) .unwrap(); } if let Some(capsfilter) = &self.base_plate_capsfilter { debug!("updating fallback image resolution"); capsfilter .set_property( "caps", &gst::Caps::builder("video/x-raw") .field("width", &self.config.width) .field("height", &self.config.height) .field("pixel-aspect-ratio", &gst::Fraction::new(1, 1)) .build(), ) .unwrap(); } for (slot_id, slot) in &self.consumer_slots { if let Some(ref capsfilter) = slot.video_capsfilter { debug!(slot_id = %slot_id,"updating mixer slot resolution"); capsfilter .set_property( "caps", &gst::Caps::builder("video/x-raw") .field("width", &self.config.width) .field("height", &self.config.height) .field("pixel-aspect-ratio", &gst::Fraction::new(1, 1)) .build(), ) .unwrap(); } } if let Some(capsfilter) = &self.audio_capsfilter { debug!("Updating output audio rate"); capsfilter .set_property( "caps", &gst::Caps::builder("audio/x-raw") .field("rate", &self.config.sample_rate) .build(), ) .unwrap(); } Ok(()) } /// Implement Connect command #[instrument(level = "debug", name = "connecting", skip(self, video_producer, audio_producer), fields(id = %self.id))] fn connect( &mut self, link_id: &str, video_producer: &StreamProducer, audio_producer: &StreamProducer, ) -> Result<(), Error> { if self.consumer_slots.contains_key(link_id) { return Err(anyhow!("mixer {} already has link {}", self.id, link_id)); } let video_appsrc = gst::ElementFactory::make( "appsrc", Some(&format!("mixer-slot-video-appsrc-{}", link_id)), ) .unwrap() .downcast::<gst_app::AppSrc>() .unwrap(); let audio_appsrc = gst::ElementFactory::make( "appsrc", Some(&format!("mixer-slot-audio-appsrc-{}", link_id)), ) .unwrap() .downcast::<gst_app::AppSrc>() .unwrap(); for appsrc in &[&video_appsrc, &audio_appsrc] { appsrc.set_format(gst::Format::Time); appsrc.set_is_live(true); appsrc.set_handle_segment_change(true); } let mut slot = ConsumerSlot { video_producer: video_producer.clone(), audio_producer: audio_producer.clone(), video_appsrc, audio_appsrc, audio_bin: None, video_bin: None, volume: 1.0, video_capsfilter: None, }; if self.state_machine.state == State::Started { let vmixer = self.video_mixer.clone().unwrap(); let amixer = self.audio_mixer.clone().unwrap(); if let Err(err) = Mixer::connect_slot( &self.pipeline, &mut slot, &vmixer, &amixer, &self.id, link_id, &self.config, ) { return Err(err); } } self.consumer_slots.insert(link_id.to_string(), slot); Ok(()) } /// Implement Disconnect command #[instrument(level = "debug", name = "disconnecting", skip(self), fields(id = %self.id))] fn disconnect(&mut self, slot_id: &str) -> Result<(), Error> { if let Some(slot) = self.consumer_slots.remove(slot_id) { slot.video_producer.remove_consumer(slot_id); slot.audio_producer.remove_consumer(slot_id); if let Some(video_bin) = slot.video_bin { let mixer_pad = video_bin.static_pad("src").unwrap().peer().unwrap(); video_bin.set_locked_state(true); video_bin.set_state(gst::State::Null).unwrap(); self.pipeline.remove(&video_bin).unwrap(); self.video_mixer .clone() .unwrap() .release_request_pad(&mixer_pad); } if let Some(audio_bin) = slot.audio_bin { let mixer_pad = audio_bin.static_pad("src").unwrap().peer().unwrap(); audio_bin.set_locked_state(true); audio_bin.set_state(gst::State::Null).unwrap(); self.pipeline.remove(&audio_bin).unwrap(); self.audio_mixer .clone() .unwrap() .release_request_pad(&mixer_pad); } Ok(()) } else { Err(anyhow!("mixer {} has no slot with id {}", self.id, slot_id)) } } #[instrument(level = "debug", skip(self, ctx), fields(id = %self.id))] fn stop(&mut self, ctx: &mut Context<Self>) { self.stop_schedule(ctx); ctx.stop(); } } impl Schedulable<Self> for Mixer { fn state_machine(&self) -> &StateMachine { &self.state_machine } fn state_machine_mut(&mut self) -> &mut StateMachine { &mut self.state_machine } fn node_id(&self) -> &str { &self.id } #[instrument(level = "debug", skip(self, ctx), fields(id = %self.id))] fn transition( &mut self, ctx: &mut Context<Self>, target: State, ) -> Result<StateChangeResult, Error> { match target { State::Initial => Ok(StateChangeResult::Skip), State::Starting => self.start_pipeline(ctx), State::Started => Ok(StateChangeResult::Success), State::Stopping => Ok(StateChangeResult::Skip), State::Stopped => { self.stop(ctx); Ok(StateChangeResult::Success) } } } } impl Handler<ConsumerMessage> for Mixer { type Result = MessageResult<ConsumerMessage>; fn handle(&mut self, msg: ConsumerMessage, _ctx: &mut Context<Self>) -> Self::Result { match msg { ConsumerMessage::Connect { link_id, video_producer, audio_producer, } => MessageResult(self.connect(&link_id, &video_producer, &audio_producer)), ConsumerMessage::Disconnect { slot_id } => MessageResult(self.disconnect(&slot_id)), } } } impl Handler<StartMessage> for Mixer { type Result = MessageResult<StartMessage>; fn handle(&mut self, msg: StartMessage, ctx: &mut Context<Self>) -> Self::Result { MessageResult(self.start_schedule(ctx, msg.cue_time, msg.end_time)) } } impl Handler<MixerCommandMessage> for Mixer { type Result = MessageResult<MixerCommandMessage>; fn handle(&mut self, msg: MixerCommandMessage, _ctx: &mut Context<Self>) -> Self::Result { match msg.command { MixerCommand::UpdateConfig { width, height, sample_rate, } => MessageResult(self.update_config(width, height, sample_rate)), MixerCommand::SetSlotVolume { slot_id, volume } => { MessageResult(self.set_slot_volume(&slot_id, volume)) } } } } impl Handler<ErrorMessage> for Mixer { type Result = (); fn handle(&mut self, msg: ErrorMessage, ctx: &mut Context<Self>) -> Self::Result { error!("Got error message '{}' on destination {}", msg.0, self.id,); NodeManager::from_registry().do_send(NodeStatusMessage::Error { id: self.id.clone(), message: msg.0, }); gst::debug_bin_to_dot_file_with_ts( &self.pipeline, gst::DebugGraphDetails::all(), format!("error-mixer-{}", self.id), ); ctx.stop(); } } impl Handler<GetProducerMessage> for Mixer { type Result = MessageResult<GetProducerMessage>; fn
(&mut self, _msg: GetProducerMessage, _ctx: &mut Context<Self>) -> Self::Result { MessageResult(Ok(( self.video_producer.clone(), self.audio_producer.clone(), ))) } } impl Handler<ScheduleMessage> for Mixer { type Result = Result<(), Error>; fn handle(&mut self, msg: ScheduleMessage, ctx: &mut Context<Self>) -> Self::Result { self.reschedule(ctx, msg.cue_time, msg.end_time) } } impl Handler<StopMessage> for Mixer { type Result = Result<(), Error>; fn handle(&mut self, _msg: StopMessage, ctx: &mut Context<Self>) -> Self::Result { ctx.stop(); Ok(()) } } impl Handler<GetNodeInfoMessage> for Mixer { type Result = Result<NodeInfo, Error>; fn handle(&mut self, _msg: GetNodeInfoMessage, _ctx: &mut Context<Self>) -> Self::Result { Ok(NodeInfo::Mixer(MixerInfo { width: self.config.width, height: self.config.height, sample_rate: self.config.sample_rate, slots: self .consumer_slots .iter() .map(|(id, slot)| { ( id.clone(), MixerSlotInfo { volume: slot.volume, }, ) }) .collect(), consumer_slot_ids: self.video_producer.get_consumer_ids(), cue_time: self.state_machine.cue_time, end_time: self.state_machine.end_time, state: self.state_machine.state, })) } }
handle
main.rs
// Evaluates a polynomial at a given point by Horner's rule // Input: An array p[0..n] of coefficients of a polynomial of degree n, // stored from the lowest to the highest and a number x // Output: The value of the polynomial at x fn horner(p: &[i8], x: i8) -> i16 { let n = p.len(); let mut pv: i16 = p[n - 1] as i16; for i in 0..(n-1) { pv = (x as i16 * pv) + p[n - 2 - i] as i16; } pv }
fn main() { // evaluate example polynomial from book let p = [-5, 1, 3, -1, 2]; println!("Evaluation of 2x^4 - x^3 + 3x^2 + x - 5 at x = 3: {}", horner(&p, 3)); }
job48.py
""" #Trains a ResNet on the CIFAR10 dataset. """ from __future__ import print_function import keras from keras.layers import Dense, Conv2D, BatchNormalization, Activation from keras.layers import AveragePooling2D, Input, Flatten from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, LearningRateScheduler from keras.callbacks import ReduceLROnPlateau, TensorBoard from keras.preprocessing.image import ImageDataGenerator from keras.regularizers import l2 from keras import backend as K from keras.models import Model from keras.datasets import cifar10 from keras.applications.resnet import ResNet50, ResNet101, ResNet152 from keras import models, layers, optimizers from datetime import datetime import tensorflow as tf import numpy as np import os import pdb import sys import argparse import time import signal import glob import json import send_signal parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training') parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name') parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint') parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use') parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)') parser.set_defaults(resume=False) args = parser.parse_args() os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num # Training parameters batch_size = 32 args_lr = 0.0014 args_model = 'resnet101' epoch_begin_time = 0 job_name = sys.argv[0].split('.')[0] save_files = '/scratch/li.baol/checkpoint_final4/' + job_name + '*' total_epochs = 134 starting_epoch = 0 # first step is to update the PID pid = os.getpid() message = job_name + ' pid ' + str(pid) # 'job50 pid 3333' send_signal.send(args.node, 10002, message) if args.resume: save_file = glob.glob(save_files)[0] # epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0]) starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1]) data_augmentation = True num_classes = 10 # Subtracting pixel mean improves accuracy subtract_pixel_mean = True n = 3 # Model name, depth and version model_type = args.tc #'P100_resnet50_he_256_1' # Load the CIFAR10 data. (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Normalize data. x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 # If subtract pixel mean is enabled if subtract_pixel_mean: x_train_mean = np.mean(x_train, axis=0) x_train -= x_train_mean x_test -= x_train_mean print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') print('y_train shape:', y_train.shape) # Convert class vectors to binary class matrices. y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) if args.resume: print('resume from checkpoint') message = job_name + ' b_end' send_signal.send(args.node, 10002, message) model = keras.models.load_model(save_file) message = job_name + ' c_end' send_signal.send(args.node, 10002, message) else: print('train from start') model = models.Sequential() if '50' in args_model: base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) elif '101' in args_model: base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) elif '152' in args_model: base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) #base_model.summary() #pdb.set_trace() #model.add(layers.UpSampling2D((2,2))) #model.add(layers.UpSampling2D((2,2))) #model.add(layers.UpSampling2D((2,2))) model.add(base_model) model.add(layers.Flatten()) #model.add(layers.BatchNormalization()) #model.add(layers.Dense(128, activation='relu')) #model.add(layers.Dropout(0.5)) #model.add(layers.BatchNormalization()) #model.add(layers.Dense(64, activation='relu')) #model.add(layers.Dropout(0.5)) #model.add(layers.BatchNormalization()) model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=args_lr), metrics=['accuracy']) #model.summary() print(model_type) #pdb.set_trace() current_epoch = 0 ################### connects interrupt signal to the process ##################### def terminateProcess(signalNumber, frame): # first record the wasted epoch time
signal.signal(signal.SIGTERM, terminateProcess) ################################################################################# logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch') first_epoch_start = 0 class PrintEpoch(keras.callbacks.Callback): def on_epoch_begin(self, epoch, logs=None): global current_epoch, first_epoch_start #remaining_epochs = epochs - epoch current_epoch = epoch print('current epoch ' + str(current_epoch)) global epoch_begin_time epoch_begin_time = time.time() if epoch == starting_epoch and args.resume: first_epoch_start = time.time() message = job_name + ' d_end' send_signal.send(args.node, 10002, message) elif epoch == starting_epoch: first_epoch_start = time.time() if epoch == starting_epoch: # send signal to indicate checkpoint is qualified message = job_name + ' ckpt_qual' send_signal.send(args.node, 10002, message) def on_epoch_end(self, epoch, logs=None): if epoch == starting_epoch: first_epoch_time = int(time.time() - first_epoch_start) message = job_name + ' 1st_epoch ' + str(first_epoch_time) send_signal.send(args.node, 10002, message) progress = round((epoch+1) / round(total_epochs/2), 2) message = job_name + ' completion ' + str(progress) send_signal.send(args.node, 10002, message) my_callback = PrintEpoch() callbacks = [tensorboard_callback, my_callback] #[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback] # Run training model.fit(x_train, y_train, batch_size=batch_size, epochs=round(total_epochs/2), validation_data=(x_test, y_test), shuffle=True, callbacks=callbacks, initial_epoch=starting_epoch, verbose=1 ) # Score trained model. scores = model.evaluate(x_test, y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) # send signal to indicate job has finished message = job_name + ' finish' send_signal.send(args.node, 10002, message)
global epoch_begin_time if epoch_begin_time == 0: epoch_waste_time = 0 else: epoch_waste_time = int(time.time() - epoch_begin_time) message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100' if epoch_waste_time > 0: send_signal.send(args.node, 10002, message) print('checkpointing the model triggered by kill -15 signal') # delete whatever checkpoint that already exists for f in glob.glob(save_files): os.remove(f) model.save('/scratch/li.baol/checkpoint_final4/' + job_name + '_' + str(current_epoch) + '.h5') print ('(SIGTERM) terminating the process') message = job_name + ' checkpoint' send_signal.send(args.node, 10002, message) sys.exit()
image.rs
use std::hash::{Hash, Hasher}; use std::collections::HashMap; use std::collections::HashSet; use std::fmt; use super::version; #[cfg(test)] use super::image; pub struct ImageEntry { pub id: String, pub ver: version::Version, } impl PartialEq for ImageEntry { fn eq(&self, other: &Self) -> bool
} impl Eq for ImageEntry {} impl Hash for ImageEntry { fn hash<H:Hasher>(&self, state: &mut H) { self.ver.hash(state); } } impl fmt::Debug for ImageEntry { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "ImageEntry [id: {}, ver: {}]", self.id, self.ver) } } pub struct Images { // key: repository pub entries: HashMap<String, HashSet<ImageEntry>> } impl Images { pub fn delete<F>(&self, canonical_keep_count: usize, snapshot_keep_count: usize, mut del: F) -> () where F : FnMut(&str, &version::Version) -> () // repo, version { for (repo, entry) in &self.entries { let mut sum_canonical: HashMap<&Option<String>, Vec<&version::Version>> = HashMap::new(); let mut sum_snapshot: HashMap<&Option<String>, Vec<&version::Version>> = HashMap::new(); for e in entry { let sum = if e.ver.is_snapshot { &mut sum_snapshot } else { &mut sum_canonical }; let keep_count = if e.ver.is_snapshot { snapshot_keep_count } else {canonical_keep_count }; let tbl = sum.entry(&e.ver.branch).or_insert_with(|| Vec::new()); match tbl.binary_search(&&e.ver) { Ok(_idx) => { }, Err(idx) => { tbl.insert(idx, &e.ver); } } if keep_count < tbl.len() { let v = tbl.remove(0); del(repo, v); } } } } } #[test] fn delete_test() { let parser = version::parser(); let mut map: HashMap<String, HashSet<image::ImageEntry>> = HashMap::new(); let mut entries0 = HashSet::new(); entries0.insert(ImageEntry { id: "id00".to_string(), ver: parser.parse("1.0").unwrap() }); entries0.insert(ImageEntry { id: "id01".to_string(), ver: parser.parse("1.1").unwrap() }); entries0.insert(ImageEntry { id: "id02".to_string(), ver: parser.parse("1.10").unwrap() }); entries0.insert(ImageEntry { id: "id03".to_string(), ver: parser.parse("1.2").unwrap() }); entries0.insert(ImageEntry { id: "id04".to_string(), ver: parser.parse("1.2-SNAPSHOT").unwrap() }); entries0.insert(ImageEntry { id: "id05".to_string(), ver: parser.parse("1.1-SNAPSHOT").unwrap() }); entries0.insert(ImageEntry { id: "id06".to_string(), ver: parser.parse("1.2.0-BR123").unwrap() }); entries0.insert(ImageEntry { id: "id07".to_string(), ver: parser.parse("1.2.1-BR123").unwrap() }); entries0.insert(ImageEntry { id: "id08".to_string(), ver: parser.parse("1.2.10-BR123").unwrap() }); entries0.insert(ImageEntry { id: "id09".to_string(), ver: parser.parse("1.2.2-BR123").unwrap() }); entries0.insert(ImageEntry { id: "id10".to_string(), ver: parser.parse("1.2.2-BR123-SNAPSHOT").unwrap() }); entries0.insert(ImageEntry { id: "id11".to_string(), ver: parser.parse("1.2.1-BR123-SNAPSHOT").unwrap() }); map.insert("repo0".to_string(), entries0); let mut entries1 = HashSet::new(); entries1.insert(ImageEntry { id: "id12".to_string(), ver: parser.parse("2.0").unwrap() }); entries1.insert(ImageEntry { id: "id13".to_string(), ver: parser.parse("2.1").unwrap() }); entries1.insert(ImageEntry { id: "id14".to_string(), ver: parser.parse("2.10").unwrap() }); entries1.insert(ImageEntry { id: "id15".to_string(), ver: parser.parse("2.2").unwrap() }); map.insert("repo1".to_string(), entries1); let images = Images { entries: map }; let mut deleted = HashSet::new(); images.delete(3, 1, |repo, ver| { deleted.insert(format!("{}:{}", repo, ver.to_string())); }); assert_eq!(deleted.len(), 5); assert_eq!(deleted.contains("repo0:1.0"), true); assert_eq!(deleted.contains("repo0:1.1-SNAPSHOT"), true); assert_eq!(deleted.contains("repo0:1.2.0-BR123"), true); assert_eq!(deleted.contains("repo0:1.2.1-BR123-SNAPSHOT"), true); assert_eq!(deleted.contains("repo1:2.0"), true); }
{ self.ver == other.ver }
inline.rs
//! Inlining pass for MIR functions use rustc_attr as attr; use rustc_hir::def_id::DefId; use rustc_index::bit_set::BitSet; use rustc_index::vec::{Idx, IndexVec}; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; use rustc_middle::mir::visit::*; use rustc_middle::mir::*; use rustc_middle::ty::subst::{Subst, SubstsRef}; use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyCtxt}; use rustc_target::spec::abi::Abi; use super::simplify::{remove_dead_blocks, CfgSimplifier}; use crate::transform::MirPass; use std::collections::VecDeque; use std::iter; const DEFAULT_THRESHOLD: usize = 50; const HINT_THRESHOLD: usize = 100; const INSTR_COST: usize = 5; const CALL_PENALTY: usize = 25; const LANDINGPAD_PENALTY: usize = 50; const RESUME_PENALTY: usize = 45; const UNKNOWN_SIZE_COST: usize = 10; pub struct Inline; #[derive(Copy, Clone, Debug)] struct CallSite<'tcx> { callee: DefId, substs: SubstsRef<'tcx>, bb: BasicBlock, location: SourceInfo, } impl<'tcx> MirPass<'tcx> for Inline { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 { if tcx.sess.opts.debugging_opts.instrument_coverage { // The current implementation of source code coverage injects code region counters // into the MIR, and assumes a 1-to-1 correspondence between MIR and source-code- // based function. debug!("function inlining is disabled when compiling with `instrument_coverage`"); } else { Inliner { tcx, param_env: tcx.param_env_reveal_all_normalized(body.source.def_id()), codegen_fn_attrs: tcx.codegen_fn_attrs(body.source.def_id()), } .run_pass(body); } } } } struct Inliner<'tcx> { tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, codegen_fn_attrs: &'tcx CodegenFnAttrs, } impl Inliner<'tcx> { fn run_pass(&self, caller_body: &mut Body<'tcx>) { // Keep a queue of callsites to try inlining on. We take // advantage of the fact that queries detect cycles here to // allow us to try and fetch the fully optimized MIR of a // call; if it succeeds, we can inline it and we know that // they do not call us. Otherwise, we just don't try to // inline. // // We use a queue so that we inline "broadly" before we inline // in depth. It is unclear if this is the best heuristic, // really, but that's true of all the heuristics in this // file. =) let mut callsites = VecDeque::new(); let def_id = caller_body.source.def_id(); // Only do inlining into fn bodies. let self_hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); if self.tcx.hir().body_owner_kind(self_hir_id).is_fn_or_closure() && caller_body.source.promoted.is_none() { for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated() { if let Some(callsite) = self.get_valid_function_call(bb, bb_data, caller_body) { callsites.push_back(callsite); } } } else { return; } let mut local_change; let mut changed = false; loop { local_change = false; while let Some(callsite) = callsites.pop_front() { debug!("checking whether to inline callsite {:?}", callsite); if !self.tcx.is_mir_available(callsite.callee) { debug!("checking whether to inline callsite {:?} - MIR unavailable", callsite); continue; } let callee_body = if let Some(callee_def_id) = callsite.callee.as_local() { let callee_hir_id = self.tcx.hir().local_def_id_to_hir_id(callee_def_id); // Avoid a cycle here by only using `optimized_mir` only if we have // a lower `HirId` than the callee. This ensures that the callee will // not inline us. This trick only works without incremental compilation. // So don't do it if that is enabled. Also avoid inlining into generators, // since their `optimized_mir` is used for layout computation, which can // create a cycle, even when no attempt is made to inline the function // in the other direction. if !self.tcx.dep_graph.is_fully_enabled() && self_hir_id < callee_hir_id && caller_body.generator_kind.is_none() { self.tcx.optimized_mir(callsite.callee) } else { continue; } } else { // This cannot result in a cycle since the callee MIR is from another crate // and is already optimized. self.tcx.optimized_mir(callsite.callee) }; let callee_body = if self.consider_optimizing(callsite, callee_body) { self.tcx.subst_and_normalize_erasing_regions( &callsite.substs, self.param_env, callee_body, ) } else { continue; }; // Copy only unevaluated constants from the callee_body into the caller_body. // Although we are only pushing `ConstKind::Unevaluated` consts to // `required_consts`, here we may not only have `ConstKind::Unevaluated` // because we are calling `subst_and_normalize_erasing_regions`. caller_body.required_consts.extend( callee_body.required_consts.iter().copied().filter(|&constant| { matches!(constant.literal.val, ConstKind::Unevaluated(_, _, _)) }), ); let start = caller_body.basic_blocks().len(); debug!("attempting to inline callsite {:?} - body={:?}", callsite, callee_body); if !self.inline_call(callsite, caller_body, callee_body) { debug!("attempting to inline callsite {:?} - failure", callsite); continue; } debug!("attempting to inline callsite {:?} - success", callsite); // Add callsites from inlined function for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated().skip(start) { if let Some(new_callsite) = self.get_valid_function_call(bb, bb_data, caller_body) { // Don't inline the same function multiple times. if callsite.callee != new_callsite.callee { callsites.push_back(new_callsite); } } } local_change = true; changed = true; } if !local_change { break; } } // Simplify if we inlined anything. if changed { debug!("running simplify cfg on {:?}", caller_body.source); CfgSimplifier::new(caller_body).simplify(); remove_dead_blocks(caller_body); } } fn get_valid_function_call( &self, bb: BasicBlock, bb_data: &BasicBlockData<'tcx>, caller_body: &Body<'tcx>, ) -> Option<CallSite<'tcx>> { // Don't inline calls that are in cleanup blocks. if bb_data.is_cleanup { return None; } // Only consider direct calls to functions let terminator = bb_data.terminator(); if let TerminatorKind::Call { func: ref op, .. } = terminator.kind { if let ty::FnDef(callee_def_id, substs) = *op.ty(caller_body, self.tcx).kind() { // To resolve an instance its substs have to be fully normalized, so // we do this here. let normalized_substs = self.tcx.normalize_erasing_regions(self.param_env, substs); let instance = Instance::resolve(self.tcx, self.param_env, callee_def_id, normalized_substs) .ok() .flatten()?; if let InstanceDef::Virtual(..) = instance.def { return None; } return Some(CallSite { callee: instance.def_id(), substs: instance.substs, bb, location: terminator.source_info, }); } } None } fn
(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool { debug!("consider_optimizing({:?})", callsite); self.should_inline(callsite, callee_body) && self.tcx.consider_optimizing(|| { format!("Inline {:?} into {:?}", callee_body.span, callsite) }) } fn should_inline(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool { debug!("should_inline({:?})", callsite); let tcx = self.tcx; // Cannot inline generators which haven't been transformed yet if callee_body.yield_ty.is_some() { debug!(" yield ty present - not inlining"); return false; } let codegen_fn_attrs = tcx.codegen_fn_attrs(callsite.callee); if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::TRACK_CALLER) { debug!("`#[track_caller]` present - not inlining"); return false; } let self_features = &self.codegen_fn_attrs.target_features; let callee_features = &codegen_fn_attrs.target_features; if callee_features.iter().any(|feature| !self_features.contains(feature)) { debug!("`callee has extra target features - not inlining"); return false; } let self_no_sanitize = self.codegen_fn_attrs.no_sanitize & self.tcx.sess.opts.debugging_opts.sanitizer; let callee_no_sanitize = codegen_fn_attrs.no_sanitize & self.tcx.sess.opts.debugging_opts.sanitizer; if self_no_sanitize != callee_no_sanitize { debug!("`callee has incompatible no_sanitize attribute - not inlining"); return false; } let hinted = match codegen_fn_attrs.inline { // Just treat inline(always) as a hint for now, // there are cases that prevent inlining that we // need to check for first. attr::InlineAttr::Always => true, attr::InlineAttr::Never => { debug!("`#[inline(never)]` present - not inlining"); return false; } attr::InlineAttr::Hint => true, attr::InlineAttr::None => false, }; // Only inline local functions if they would be eligible for cross-crate // inlining. This is to ensure that the final crate doesn't have MIR that // reference unexported symbols if callsite.callee.is_local() { if callsite.substs.non_erasable_generics().count() == 0 && !hinted { debug!(" callee is an exported function - not inlining"); return false; } } let mut threshold = if hinted { HINT_THRESHOLD } else { DEFAULT_THRESHOLD }; // Significantly lower the threshold for inlining cold functions if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) { threshold /= 5; } // Give a bonus functions with a small number of blocks, // We normally have two or three blocks for even // very small functions. if callee_body.basic_blocks().len() <= 3 { threshold += threshold / 4; } debug!(" final inline threshold = {}", threshold); // FIXME: Give a bonus to functions with only a single caller let mut first_block = true; let mut cost = 0; // Traverse the MIR manually so we can account for the effects of // inlining on the CFG. let mut work_list = vec![START_BLOCK]; let mut visited = BitSet::new_empty(callee_body.basic_blocks().len()); while let Some(bb) = work_list.pop() { if !visited.insert(bb.index()) { continue; } let blk = &callee_body.basic_blocks()[bb]; for stmt in &blk.statements { // Don't count StorageLive/StorageDead in the inlining cost. match stmt.kind { StatementKind::StorageLive(_) | StatementKind::StorageDead(_) | StatementKind::Nop => {} _ => cost += INSTR_COST, } } let term = blk.terminator(); let mut is_drop = false; match term.kind { TerminatorKind::Drop { ref place, target, unwind } | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } => { is_drop = true; work_list.push(target); // If the place doesn't actually need dropping, treat it like // a regular goto. let ty = place.ty(callee_body, tcx).subst(tcx, callsite.substs).ty; if ty.needs_drop(tcx, self.param_env) { cost += CALL_PENALTY; if let Some(unwind) = unwind { cost += LANDINGPAD_PENALTY; work_list.push(unwind); } } else { cost += INSTR_COST; } } TerminatorKind::Unreachable | TerminatorKind::Call { destination: None, .. } if first_block => { // If the function always diverges, don't inline // unless the cost is zero threshold = 0; } TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => { if let ty::FnDef(def_id, _) = *f.literal.ty.kind() { // Don't give intrinsics the extra penalty for calls let f = tcx.fn_sig(def_id); if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic { cost += INSTR_COST; } else { cost += CALL_PENALTY; } } else { cost += CALL_PENALTY; } if cleanup.is_some() { cost += LANDINGPAD_PENALTY; } } TerminatorKind::Assert { cleanup, .. } => { cost += CALL_PENALTY; if cleanup.is_some() { cost += LANDINGPAD_PENALTY; } } TerminatorKind::Resume => cost += RESUME_PENALTY, _ => cost += INSTR_COST, } if !is_drop { for &succ in term.successors() { work_list.push(succ); } } first_block = false; } // Count up the cost of local variables and temps, if we know the size // use that, otherwise we use a moderately-large dummy cost. let ptr_size = tcx.data_layout.pointer_size.bytes(); for v in callee_body.vars_and_temps_iter() { let v = &callee_body.local_decls[v]; let ty = v.ty.subst(tcx, callsite.substs); // Cost of the var is the size in machine-words, if we know // it. if let Some(size) = type_size_of(tcx, self.param_env, ty) { cost += (size / ptr_size) as usize; } else { cost += UNKNOWN_SIZE_COST; } } if let attr::InlineAttr::Always = codegen_fn_attrs.inline { debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost); true } else { if cost <= threshold { debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold); true } else { debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold); false } } } fn inline_call( &self, callsite: CallSite<'tcx>, caller_body: &mut Body<'tcx>, mut callee_body: Body<'tcx>, ) -> bool { let terminator = caller_body[callsite.bb].terminator.take().unwrap(); match terminator.kind { // FIXME: Handle inlining of diverging calls TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => { debug!("inlined {:?} into {:?}", callsite.callee, caller_body.source); let mut local_map = IndexVec::with_capacity(callee_body.local_decls.len()); let mut scope_map = IndexVec::with_capacity(callee_body.source_scopes.len()); for mut scope in callee_body.source_scopes.iter().cloned() { if scope.parent_scope.is_none() { scope.parent_scope = Some(callsite.location.scope); // FIXME(eddyb) is this really needed? // (also note that it's always overwritten below) scope.span = callee_body.span; } // FIXME(eddyb) this doesn't seem right at all. // The inlined source scopes should probably be annotated as // such, but also contain all of the original information. scope.span = callsite.location.span; let idx = caller_body.source_scopes.push(scope); scope_map.push(idx); } for loc in callee_body.vars_and_temps_iter() { let mut local = callee_body.local_decls[loc].clone(); local.source_info.scope = scope_map[local.source_info.scope]; local.source_info.span = callsite.location.span; let idx = caller_body.local_decls.push(local); local_map.push(idx); } // If the call is something like `a[*i] = f(i)`, where // `i : &mut usize`, then just duplicating the `a[*i]` // Place could result in two different locations if `f` // writes to `i`. To prevent this we need to create a temporary // borrow of the place and pass the destination as `*temp` instead. fn dest_needs_borrow(place: Place<'_>) -> bool { for elem in place.projection.iter() { match elem { ProjectionElem::Deref | ProjectionElem::Index(_) => return true, _ => {} } } false } let dest = if dest_needs_borrow(destination.0) { debug!("creating temp for return destination"); let dest = Rvalue::Ref( self.tcx.lifetimes.re_erased, BorrowKind::Mut { allow_two_phase_borrow: false }, destination.0, ); let ty = dest.ty(caller_body, self.tcx); let temp = LocalDecl::new(ty, callsite.location.span); let tmp = caller_body.local_decls.push(temp); let tmp = Place::from(tmp); let stmt = Statement { source_info: callsite.location, kind: StatementKind::Assign(box (tmp, dest)), }; caller_body[callsite.bb].statements.push(stmt); self.tcx.mk_place_deref(tmp) } else { destination.0 }; let return_block = destination.1; // Copy the arguments if needed. let args: Vec<_> = self.make_call_args(args, &callsite, caller_body, return_block); let bb_len = caller_body.basic_blocks().len(); let mut integrator = Integrator { block_idx: bb_len, args: &args, local_map, scope_map, destination: dest, return_block, cleanup_block: cleanup, in_cleanup_block: false, tcx: self.tcx, }; for mut var_debug_info in callee_body.var_debug_info.drain(..) { integrator.visit_var_debug_info(&mut var_debug_info); caller_body.var_debug_info.push(var_debug_info); } for (bb, mut block) in callee_body.basic_blocks_mut().drain_enumerated(..) { integrator.visit_basic_block_data(bb, &mut block); caller_body.basic_blocks_mut().push(block); } let terminator = Terminator { source_info: callsite.location, kind: TerminatorKind::Goto { target: BasicBlock::new(bb_len) }, }; caller_body[callsite.bb].terminator = Some(terminator); true } kind => { caller_body[callsite.bb].terminator = Some(Terminator { source_info: terminator.source_info, kind }); false } } } fn make_call_args( &self, args: Vec<Operand<'tcx>>, callsite: &CallSite<'tcx>, caller_body: &mut Body<'tcx>, return_block: BasicBlock, ) -> Vec<Local> { let tcx = self.tcx; // There is a bit of a mismatch between the *caller* of a closure and the *callee*. // The caller provides the arguments wrapped up in a tuple: // // tuple_tmp = (a, b, c) // Fn::call(closure_ref, tuple_tmp) // // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`) // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has // the job of unpacking this tuple. But here, we are codegen. =) So we want to create // a vector like // // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2] // // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient // if we "spill" that into *another* temporary, so that we can map the argument // variable in the callee MIR directly to an argument variable on our side. // So we introduce temporaries like: // // tmp0 = tuple_tmp.0 // tmp1 = tuple_tmp.1 // tmp2 = tuple_tmp.2 // // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`. if tcx.is_closure(callsite.callee) { let mut args = args.into_iter(); let self_ = self.create_temp_if_necessary( args.next().unwrap(), callsite, caller_body, return_block, ); let tuple = self.create_temp_if_necessary( args.next().unwrap(), callsite, caller_body, return_block, ); assert!(args.next().is_none()); let tuple = Place::from(tuple); let tuple_tys = if let ty::Tuple(s) = tuple.ty(caller_body, tcx).ty.kind() { s } else { bug!("Closure arguments are not passed as a tuple"); }; // The `closure_ref` in our example above. let closure_ref_arg = iter::once(self_); // The `tmp0`, `tmp1`, and `tmp2` in our example abonve. let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| { // This is e.g., `tuple_tmp.0` in our example above. let tuple_field = Operand::Move(tcx.mk_place_field(tuple, Field::new(i), ty.expect_ty())); // Spill to a local to make e.g., `tmp0`. self.create_temp_if_necessary(tuple_field, callsite, caller_body, return_block) }); closure_ref_arg.chain(tuple_tmp_args).collect() } else { args.into_iter() .map(|a| self.create_temp_if_necessary(a, callsite, caller_body, return_block)) .collect() } } /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh /// temporary `T` and an instruction `T = arg`, and returns `T`. fn create_temp_if_necessary( &self, arg: Operand<'tcx>, callsite: &CallSite<'tcx>, caller_body: &mut Body<'tcx>, return_block: BasicBlock, ) -> Local { // FIXME: Analysis of the usage of the arguments to avoid // unnecessary temporaries. if let Operand::Move(place) = &arg { if let Some(local) = place.as_local() { if caller_body.local_kind(local) == LocalKind::Temp { // Reuse the operand if it's a temporary already return local; } } } debug!("creating temp for argument {:?}", arg); // Otherwise, create a temporary for the arg let arg = Rvalue::Use(arg); let ty = arg.ty(caller_body, self.tcx); let arg_tmp = LocalDecl::new(ty, callsite.location.span); let arg_tmp = caller_body.local_decls.push(arg_tmp); caller_body[callsite.bb].statements.push(Statement { source_info: callsite.location, kind: StatementKind::StorageLive(arg_tmp), }); caller_body[callsite.bb].statements.push(Statement { source_info: callsite.location, kind: StatementKind::Assign(box (Place::from(arg_tmp), arg)), }); caller_body[return_block].statements.insert( 0, Statement { source_info: callsite.location, kind: StatementKind::StorageDead(arg_tmp) }, ); arg_tmp } } fn type_size_of<'tcx>( tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>, ) -> Option<u64> { tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes()) } /** * Integrator. * * Integrates blocks from the callee function into the calling function. * Updates block indices, references to locals and other control flow * stuff. */ struct Integrator<'a, 'tcx> { block_idx: usize, args: &'a [Local], local_map: IndexVec<Local, Local>, scope_map: IndexVec<SourceScope, SourceScope>, destination: Place<'tcx>, return_block: BasicBlock, cleanup_block: Option<BasicBlock>, in_cleanup_block: bool, tcx: TyCtxt<'tcx>, } impl<'a, 'tcx> Integrator<'a, 'tcx> { fn update_target(&self, tgt: BasicBlock) -> BasicBlock { let new = BasicBlock::new(tgt.index() + self.block_idx); debug!("updating target `{:?}`, new: `{:?}`", tgt, new); new } fn make_integrate_local(&self, local: Local) -> Local { if local == RETURN_PLACE { return self.destination.local; } let idx = local.index() - 1; if idx < self.args.len() { return self.args[idx]; } self.local_map[Local::new(idx - self.args.len())] } } impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { fn tcx(&self) -> TyCtxt<'tcx> { self.tcx } fn visit_local(&mut self, local: &mut Local, _ctxt: PlaceContext, _location: Location) { *local = self.make_integrate_local(*local); } fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) { // If this is the `RETURN_PLACE`, we need to rebase any projections onto it. let dest_proj_len = self.destination.projection.len(); if place.local == RETURN_PLACE && dest_proj_len > 0 { let mut projs = Vec::with_capacity(dest_proj_len + place.projection.len()); projs.extend(self.destination.projection); projs.extend(place.projection); place.projection = self.tcx.intern_place_elems(&*projs); } // Handles integrating any locals that occur in the base // or projections self.super_place(place, context, location) } fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) { self.in_cleanup_block = data.is_cleanup; self.super_basic_block_data(block, data); self.in_cleanup_block = false; } fn visit_retag(&mut self, kind: &mut RetagKind, place: &mut Place<'tcx>, loc: Location) { self.super_retag(kind, place, loc); // We have to patch all inlined retags to be aware that they are no longer // happening on function entry. if *kind == RetagKind::FnEntry { *kind = RetagKind::Default; } } fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, loc: Location) { // Don't try to modify the implicit `_0` access on return (`return` terminators are // replaced down below anyways). if !matches!(terminator.kind, TerminatorKind::Return) { self.super_terminator(terminator, loc); } match terminator.kind { TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => bug!(), TerminatorKind::Goto { ref mut target } => { *target = self.update_target(*target); } TerminatorKind::SwitchInt { ref mut targets, .. } => { for tgt in targets.all_targets_mut() { *tgt = self.update_target(*tgt); } } TerminatorKind::Drop { ref mut target, ref mut unwind, .. } | TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => { *target = self.update_target(*target); if let Some(tgt) = *unwind { *unwind = Some(self.update_target(tgt)); } else if !self.in_cleanup_block { // Unless this drop is in a cleanup block, add an unwind edge to // the original call's cleanup block *unwind = self.cleanup_block; } } TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => { if let Some((_, ref mut tgt)) = *destination { *tgt = self.update_target(*tgt); } if let Some(tgt) = *cleanup { *cleanup = Some(self.update_target(tgt)); } else if !self.in_cleanup_block { // Unless this call is in a cleanup block, add an unwind edge to // the original call's cleanup block *cleanup = self.cleanup_block; } } TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => { *target = self.update_target(*target); if let Some(tgt) = *cleanup { *cleanup = Some(self.update_target(tgt)); } else if !self.in_cleanup_block { // Unless this assert is in a cleanup block, add an unwind edge to // the original call's cleanup block *cleanup = self.cleanup_block; } } TerminatorKind::Return => { terminator.kind = TerminatorKind::Goto { target: self.return_block }; } TerminatorKind::Resume => { if let Some(tgt) = self.cleanup_block { terminator.kind = TerminatorKind::Goto { target: tgt } } } TerminatorKind::Abort => {} TerminatorKind::Unreachable => {} TerminatorKind::FalseEdge { ref mut real_target, ref mut imaginary_target } => { *real_target = self.update_target(*real_target); *imaginary_target = self.update_target(*imaginary_target); } TerminatorKind::FalseUnwind { real_target: _, unwind: _ } => // see the ordering of passes in the optimized_mir query. { bug!("False unwinds should have been removed before inlining") } TerminatorKind::InlineAsm { ref mut destination, .. } => { if let Some(ref mut tgt) = *destination { *tgt = self.update_target(*tgt); } } } } fn visit_source_scope(&mut self, scope: &mut SourceScope) { *scope = self.scope_map[*scope]; } }
consider_optimizing
styles.ts
import styled from 'styled-components' export const Container = styled.div` flex: 1; height: 0; padding-bottom: 32%; overflow: hidden; position: relative; touch-action: manipulation; -webkit-box-shadow: 0px 1px 2px rgba(0, 0, 0, 0.2); -moz-box-shadow: 0px 1px 2px rgba(0, 0, 0, 0.2); box-shadow: 0px 1px 2px rgba(0, 0, 0, 0.2); cursor: pointer; border-radius: 10px; & + & { margin-left: 8px; } &::after { width: 100%; height: 100%; content: ''; top: 0; left: 0; position: absolute; transition: background-color 0.12s ease-in-out; background-color: transparent; } &:hover, &:focus-visible { &::after { background-color: rgba(0, 0, 0, 0.1); } & > div > img { transform: scale(1.025); } } ` export const Content = styled.div` width: 100%; height: 100%; top: 0; left: 0; position: absolute; display: flex; align-items: center; justify-content: center; &::after { width: 100%; height: 100%; content: ''; top: 0; left: 0; position: absolute; transition: background-color 0.12s ease-in-out; background-color: rgba(0, 0, 0, 0.08); } ` export const PreviewImage = styled.img` width: 100%; height: 100%; max-width: 100%; object-fit: cover; transition: transform 0.12s ease-in-out; ` export const UserImage = styled.img` top: 12px; left: 12px; position: absolute; width: 40px; height: 40px; object-fit: cover; border: 4px solid ${({ theme }) => theme.accent}; border-radius: 50%; ` export const TextContainer = styled.div<{ myStory?: boolean }>` width: 100%; padding: ${({ myStory }) => (myStory ? '28px 16px 12px' : '12px')}; ${({ myStory }) => myStory && `display: flex; align-items: center; justify-content: center; `}; left: 0; bottom: 0; position: absolute; background-color: ${({ theme, myStory }) => myStory ? theme.cardBackground : 'transparent'}; ` export const Text = styled.span<{ myStory?: boolean }>` width: 100%; max-width: 100%; -webkit-box-orient: vertical; -webkit-line-clamp: 2; display: -webkit-box; font: 600 1.3rem/1.2308 Helvetica, 'Inter', Arial, sans-serif; color: ${({ myStory, theme }) => (myStory ? theme.primaryText : theme.white)}; text-align: ${({ myStory }) => (myStory ? 'center' : 'left')}; word-wrap: break-word; word-break: break-word; ` export const PlusIconContainer = styled.div` width: 40px; height: 40px; top: -20px; position: absolute; display: flex; align-items: center; justify-content: center; border-radius: 50%; background-color: ${({ theme }) => theme.cardBackground}; ` export const PlusIconContent = styled.div` width: 32px; height: 32px; display: flex; align-items: center;
& > svg { width: 20px; height: 20px; fill: ${({ theme }) => theme.white}; } `
justify-content: center; border-radius: 50%; background-color: ${({ theme }) => theme.accent};
test_tasks.py
""" This code contains test tasks for luigi Author: Winston Olson-Duvall, [email protected] """ import os import luigi from emit_main.workflow.workflow_manager import WorkflowManager from emit_main.workflow.slurm import SlurmJobTask class ExampleTask(SlurmJobTask): config_path = luigi.Parameter() acquisition_id = luigi.Parameter() level = luigi.Parameter() partition = luigi.Parameter() task_namespace = "emit" def requires(self): return None def output(self): wm = WorkflowManager(config_path=self.config_path, acquisition_id=self.acquisition_id) acq = wm.acquisition return luigi.LocalTarget(acq.rdn_hdr_path) def work(self):
wm = WorkflowManager(config_path=self.config_path, acquisition_id=self.acquisition_id) acq = wm.acquisition os.system(" ".join(["touch", acq.rdn_hdr_path]))
resolveAssets.ts
import { currentRenderingInstance } from '../componentRenderUtils' import { currentInstance, Component, FunctionalComponent, ComponentOptions } from '../component' import { Directive } from '../directives' import { camelize, capitalize, isString, isObject } from '@vue/shared' import { warn } from '../warning' const COMPONENTS = 'components' const DIRECTIVES = 'directives' export function resolveComponent(name: string): Component | string | undefined { return resolveAsset(COMPONENTS, name) || name } export const NULL_DYNAMIC_COMPONENT = Symbol() export function resolveDynamicComponent( component: unknown ): Component | string | typeof NULL_DYNAMIC_COMPONENT { if (isString(component)) { return resolveAsset(COMPONENTS, component, false) || component } else { // invalid types will fallthrough to createVNode and raise warning return (component as any) || NULL_DYNAMIC_COMPONENT } } export function resolveDirective(name: string): Directive | undefined { return resolveAsset(DIRECTIVES, name) } // overload 1: components function resolveAsset( type: typeof COMPONENTS, name: string, warnMissing?: boolean ): Component | undefined // overload 2: directives function resolveAsset( type: typeof DIRECTIVES, name: string ): Directive | undefined function resolveAsset( type: typeof COMPONENTS | typeof DIRECTIVES, name: string, warnMissing = true ) { const instance = currentRenderingInstance || currentInstance if (instance) { let camelized, capitalized const registry = instance[type] let res = registry[name] || registry[(camelized = camelize(name))] || registry[(capitalized = capitalize(camelized))] if (!res && type === COMPONENTS) { const self = instance.type const selfName = (self as FunctionalComponent).displayName || self.name if ( selfName && (selfName === name || selfName === camelized || selfName === capitalized) ) { res = self } } if (__DEV__) { if (res) { // in dev, infer anonymous component's name based on registered name if ( type === COMPONENTS && isObject(res) && !(res as ComponentOptions).name ) { ;(res as ComponentOptions).name = name } } else if (warnMissing) { warn(`Failed to resolve ${type.slice(0, -1)}: ${name}`) } } return res } else if (__DEV__) { warn( `resolve${capitalize(type.slice(0, -1))} ` + `can only be used in render() or setup().`
) } }
main.go
package main import ( "github.com/soracom/soratun/cmd" "os" ) func main() { os.Exit(run()) } func run() int { if err := cmd.RootCmd.Execute(); err != nil { return -1 } return 0 }
combine.rs
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::COMBINE { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = "Possible values of the field `COMBINE0`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum COMBINE0R { #[doc = "Channels (n) and (n+1) are independent."] _0, #[doc = "Channels (n) and (n+1) are combined."] _1, } impl COMBINE0R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { COMBINE0R::_0 => false, COMBINE0R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> COMBINE0R { match value { false => COMBINE0R::_0, true => COMBINE0R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == COMBINE0R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == COMBINE0R::_1 } } #[doc = "Possible values of the field `COMP0`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum COMP0R { #[doc = "The channel (n+1) output is the same as the channel (n) output."] _0, #[doc = "The channel (n+1) output is the complement of the channel (n) output."] _1, } impl COMP0R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { COMP0R::_0 => false, COMP0R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> COMP0R { match value { false => COMP0R::_0, true => COMP0R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == COMP0R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == COMP0R::_1 } } #[doc = "Possible values of the field `DECAPEN0`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DECAPEN0R { #[doc = "The Dual Edge Capture mode in this pair of channels is disabled."] _0, #[doc = "The Dual Edge Capture mode in this pair of channels is enabled."] _1, } impl DECAPEN0R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { DECAPEN0R::_0 => false, DECAPEN0R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> DECAPEN0R { match value { false => DECAPEN0R::_0, true => DECAPEN0R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == DECAPEN0R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == DECAPEN0R::_1 } } #[doc = "Possible values of the field `DECAP0`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DECAP0R { #[doc = "The dual edge captures are inactive."] _0, #[doc = "The dual edge captures are active."] _1, } impl DECAP0R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { DECAP0R::_0 => false, DECAP0R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> DECAP0R { match value { false => DECAP0R::_0, true => DECAP0R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == DECAP0R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == DECAP0R::_1 } } #[doc = "Possible values of the field `DTEN0`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DTEN0R { #[doc = "The deadtime insertion in this pair of channels is disabled."] _0, #[doc = "The deadtime insertion in this pair of channels is enabled."] _1, } impl DTEN0R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { DTEN0R::_0 => false, DTEN0R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> DTEN0R { match value { false => DTEN0R::_0, true => DTEN0R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == DTEN0R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == DTEN0R::_1 } } #[doc = "Possible values of the field `SYNCEN0`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SYNCEN0R { #[doc = "The PWM synchronization in this pair of channels is disabled."] _0, #[doc = "The PWM synchronization in this pair of channels is enabled."] _1, } impl SYNCEN0R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SYNCEN0R::_0 => false, SYNCEN0R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SYNCEN0R { match value { false => SYNCEN0R::_0, true => SYNCEN0R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == SYNCEN0R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == SYNCEN0R::_1 } } #[doc = "Possible values of the field `FAULTEN0`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FAULTEN0R { #[doc = "The fault control in this pair of channels is disabled."] _0, #[doc = "The fault control in this pair of channels is enabled."] _1, } impl FAULTEN0R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { FAULTEN0R::_0 => false, FAULTEN0R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> FAULTEN0R { match value { false => FAULTEN0R::_0, true => FAULTEN0R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == FAULTEN0R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == FAULTEN0R::_1 } } #[doc = "Possible values of the field `COMBINE1`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum COMBINE1R { #[doc = "Channels (n) and (n+1) are independent."] _0, #[doc = "Channels (n) and (n+1) are combined."] _1, } impl COMBINE1R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { COMBINE1R::_0 => false, COMBINE1R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> COMBINE1R { match value { false => COMBINE1R::_0, true => COMBINE1R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == COMBINE1R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == COMBINE1R::_1 } } #[doc = "Possible values of the field `COMP1`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum COMP1R { #[doc = "The channel (n+1) output is the same as the channel (n) output."] _0, #[doc = "The channel (n+1) output is the complement of the channel (n) output."] _1, } impl COMP1R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { COMP1R::_0 => false, COMP1R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> COMP1R { match value { false => COMP1R::_0, true => COMP1R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == COMP1R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == COMP1R::_1 } } #[doc = "Possible values of the field `DECAPEN1`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DECAPEN1R { #[doc = "The Dual Edge Capture mode in this pair of channels is disabled."] _0, #[doc = "The Dual Edge Capture mode in this pair of channels is enabled."] _1, } impl DECAPEN1R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { DECAPEN1R::_0 => false, DECAPEN1R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> DECAPEN1R { match value { false => DECAPEN1R::_0, true => DECAPEN1R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == DECAPEN1R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == DECAPEN1R::_1 } } #[doc = "Possible values of the field `DECAP1`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DECAP1R { #[doc = "The dual edge captures are inactive."] _0, #[doc = "The dual edge captures are active."] _1, } impl DECAP1R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { DECAP1R::_0 => false, DECAP1R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> DECAP1R { match value { false => DECAP1R::_0, true => DECAP1R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == DECAP1R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == DECAP1R::_1 } } #[doc = "Possible values of the field `DTEN1`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DTEN1R { #[doc = "The deadtime insertion in this pair of channels is disabled."] _0, #[doc = "The deadtime insertion in this pair of channels is enabled."] _1, } impl DTEN1R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool
#[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { DTEN1R::_0 => false, DTEN1R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> DTEN1R { match value { false => DTEN1R::_0, true => DTEN1R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == DTEN1R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == DTEN1R::_1 } } #[doc = "Possible values of the field `SYNCEN1`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SYNCEN1R { #[doc = "The PWM synchronization in this pair of channels is disabled."] _0, #[doc = "The PWM synchronization in this pair of channels is enabled."] _1, } impl SYNCEN1R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SYNCEN1R::_0 => false, SYNCEN1R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SYNCEN1R { match value { false => SYNCEN1R::_0, true => SYNCEN1R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == SYNCEN1R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == SYNCEN1R::_1 } } #[doc = "Possible values of the field `FAULTEN1`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FAULTEN1R { #[doc = "The fault control in this pair of channels is disabled."] _0, #[doc = "The fault control in this pair of channels is enabled."] _1, } impl FAULTEN1R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { FAULTEN1R::_0 => false, FAULTEN1R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> FAULTEN1R { match value { false => FAULTEN1R::_0, true => FAULTEN1R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == FAULTEN1R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == FAULTEN1R::_1 } } #[doc = "Possible values of the field `COMBINE2`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum COMBINE2R { #[doc = "Channels (n) and (n+1) are independent."] _0, #[doc = "Channels (n) and (n+1) are combined."] _1, } impl COMBINE2R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { COMBINE2R::_0 => false, COMBINE2R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> COMBINE2R { match value { false => COMBINE2R::_0, true => COMBINE2R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == COMBINE2R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == COMBINE2R::_1 } } #[doc = "Possible values of the field `COMP2`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum COMP2R { #[doc = "The channel (n+1) output is the same as the channel (n) output."] _0, #[doc = "The channel (n+1) output is the complement of the channel (n) output."] _1, } impl COMP2R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { COMP2R::_0 => false, COMP2R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> COMP2R { match value { false => COMP2R::_0, true => COMP2R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == COMP2R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == COMP2R::_1 } } #[doc = "Possible values of the field `DECAPEN2`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DECAPEN2R { #[doc = "The Dual Edge Capture mode in this pair of channels is disabled."] _0, #[doc = "The Dual Edge Capture mode in this pair of channels is enabled."] _1, } impl DECAPEN2R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { DECAPEN2R::_0 => false, DECAPEN2R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> DECAPEN2R { match value { false => DECAPEN2R::_0, true => DECAPEN2R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == DECAPEN2R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == DECAPEN2R::_1 } } #[doc = "Possible values of the field `DECAP2`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DECAP2R { #[doc = "The dual edge captures are inactive."] _0, #[doc = "The dual edge captures are active."] _1, } impl DECAP2R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { DECAP2R::_0 => false, DECAP2R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> DECAP2R { match value { false => DECAP2R::_0, true => DECAP2R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == DECAP2R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == DECAP2R::_1 } } #[doc = "Possible values of the field `DTEN2`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DTEN2R { #[doc = "The deadtime insertion in this pair of channels is disabled."] _0, #[doc = "The deadtime insertion in this pair of channels is enabled."] _1, } impl DTEN2R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { DTEN2R::_0 => false, DTEN2R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> DTEN2R { match value { false => DTEN2R::_0, true => DTEN2R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == DTEN2R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == DTEN2R::_1 } } #[doc = "Possible values of the field `SYNCEN2`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SYNCEN2R { #[doc = "The PWM synchronization in this pair of channels is disabled."] _0, #[doc = "The PWM synchronization in this pair of channels is enabled."] _1, } impl SYNCEN2R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SYNCEN2R::_0 => false, SYNCEN2R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SYNCEN2R { match value { false => SYNCEN2R::_0, true => SYNCEN2R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == SYNCEN2R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == SYNCEN2R::_1 } } #[doc = "Possible values of the field `FAULTEN2`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FAULTEN2R { #[doc = "The fault control in this pair of channels is disabled."] _0, #[doc = "The fault control in this pair of channels is enabled."] _1, } impl FAULTEN2R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { FAULTEN2R::_0 => false, FAULTEN2R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> FAULTEN2R { match value { false => FAULTEN2R::_0, true => FAULTEN2R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == FAULTEN2R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == FAULTEN2R::_1 } } #[doc = "Possible values of the field `COMBINE3`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum COMBINE3R { #[doc = "Channels (n) and (n+1) are independent."] _0, #[doc = "Channels (n) and (n+1) are combined."] _1, } impl COMBINE3R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { COMBINE3R::_0 => false, COMBINE3R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> COMBINE3R { match value { false => COMBINE3R::_0, true => COMBINE3R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == COMBINE3R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == COMBINE3R::_1 } } #[doc = "Possible values of the field `COMP3`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum COMP3R { #[doc = "The channel (n+1) output is the same as the channel (n) output."] _0, #[doc = "The channel (n+1) output is the complement of the channel (n) output."] _1, } impl COMP3R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { COMP3R::_0 => false, COMP3R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> COMP3R { match value { false => COMP3R::_0, true => COMP3R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == COMP3R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == COMP3R::_1 } } #[doc = "Possible values of the field `DECAPEN3`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DECAPEN3R { #[doc = "The Dual Edge Capture mode in this pair of channels is disabled."] _0, #[doc = "The Dual Edge Capture mode in this pair of channels is enabled."] _1, } impl DECAPEN3R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { DECAPEN3R::_0 => false, DECAPEN3R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> DECAPEN3R { match value { false => DECAPEN3R::_0, true => DECAPEN3R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == DECAPEN3R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == DECAPEN3R::_1 } } #[doc = "Possible values of the field `DECAP3`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DECAP3R { #[doc = "The dual edge captures are inactive."] _0, #[doc = "The dual edge captures are active."] _1, } impl DECAP3R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { DECAP3R::_0 => false, DECAP3R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> DECAP3R { match value { false => DECAP3R::_0, true => DECAP3R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == DECAP3R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == DECAP3R::_1 } } #[doc = "Possible values of the field `DTEN3`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DTEN3R { #[doc = "The deadtime insertion in this pair of channels is disabled."] _0, #[doc = "The deadtime insertion in this pair of channels is enabled."] _1, } impl DTEN3R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { DTEN3R::_0 => false, DTEN3R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> DTEN3R { match value { false => DTEN3R::_0, true => DTEN3R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == DTEN3R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == DTEN3R::_1 } } #[doc = "Possible values of the field `SYNCEN3`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SYNCEN3R { #[doc = "The PWM synchronization in this pair of channels is disabled."] _0, #[doc = "The PWM synchronization in this pair of channels is enabled."] _1, } impl SYNCEN3R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SYNCEN3R::_0 => false, SYNCEN3R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SYNCEN3R { match value { false => SYNCEN3R::_0, true => SYNCEN3R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == SYNCEN3R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == SYNCEN3R::_1 } } #[doc = "Possible values of the field `FAULTEN3`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FAULTEN3R { #[doc = "The fault control in this pair of channels is disabled."] _0, #[doc = "The fault control in this pair of channels is enabled."] _1, } impl FAULTEN3R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { FAULTEN3R::_0 => false, FAULTEN3R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> FAULTEN3R { match value { false => FAULTEN3R::_0, true => FAULTEN3R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == FAULTEN3R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == FAULTEN3R::_1 } } #[doc = "Values that can be written to the field `COMBINE0`"] pub enum COMBINE0W { #[doc = "Channels (n) and (n+1) are independent."] _0, #[doc = "Channels (n) and (n+1) are combined."] _1, } impl COMBINE0W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { COMBINE0W::_0 => false, COMBINE0W::_1 => true, } } } #[doc = r" Proxy"] pub struct _COMBINE0W<'a> { w: &'a mut W, } impl<'a> _COMBINE0W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: COMBINE0W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Channels (n) and (n+1) are independent."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(COMBINE0W::_0) } #[doc = "Channels (n) and (n+1) are combined."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(COMBINE0W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `COMP0`"] pub enum COMP0W { #[doc = "The channel (n+1) output is the same as the channel (n) output."] _0, #[doc = "The channel (n+1) output is the complement of the channel (n) output."] _1, } impl COMP0W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { COMP0W::_0 => false, COMP0W::_1 => true, } } } #[doc = r" Proxy"] pub struct _COMP0W<'a> { w: &'a mut W, } impl<'a> _COMP0W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: COMP0W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The channel (n+1) output is the same as the channel (n) output."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(COMP0W::_0) } #[doc = "The channel (n+1) output is the complement of the channel (n) output."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(COMP0W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 1; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DECAPEN0`"] pub enum DECAPEN0W { #[doc = "The Dual Edge Capture mode in this pair of channels is disabled."] _0, #[doc = "The Dual Edge Capture mode in this pair of channels is enabled."] _1, } impl DECAPEN0W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { DECAPEN0W::_0 => false, DECAPEN0W::_1 => true, } } } #[doc = r" Proxy"] pub struct _DECAPEN0W<'a> { w: &'a mut W, } impl<'a> _DECAPEN0W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: DECAPEN0W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The Dual Edge Capture mode in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(DECAPEN0W::_0) } #[doc = "The Dual Edge Capture mode in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(DECAPEN0W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 2; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DECAP0`"] pub enum DECAP0W { #[doc = "The dual edge captures are inactive."] _0, #[doc = "The dual edge captures are active."] _1, } impl DECAP0W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { DECAP0W::_0 => false, DECAP0W::_1 => true, } } } #[doc = r" Proxy"] pub struct _DECAP0W<'a> { w: &'a mut W, } impl<'a> _DECAP0W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: DECAP0W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The dual edge captures are inactive."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(DECAP0W::_0) } #[doc = "The dual edge captures are active."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(DECAP0W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 3; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DTEN0`"] pub enum DTEN0W { #[doc = "The deadtime insertion in this pair of channels is disabled."] _0, #[doc = "The deadtime insertion in this pair of channels is enabled."] _1, } impl DTEN0W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { DTEN0W::_0 => false, DTEN0W::_1 => true, } } } #[doc = r" Proxy"] pub struct _DTEN0W<'a> { w: &'a mut W, } impl<'a> _DTEN0W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: DTEN0W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The deadtime insertion in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(DTEN0W::_0) } #[doc = "The deadtime insertion in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(DTEN0W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 4; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SYNCEN0`"] pub enum SYNCEN0W { #[doc = "The PWM synchronization in this pair of channels is disabled."] _0, #[doc = "The PWM synchronization in this pair of channels is enabled."] _1, } impl SYNCEN0W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { SYNCEN0W::_0 => false, SYNCEN0W::_1 => true, } } } #[doc = r" Proxy"] pub struct _SYNCEN0W<'a> { w: &'a mut W, } impl<'a> _SYNCEN0W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SYNCEN0W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The PWM synchronization in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(SYNCEN0W::_0) } #[doc = "The PWM synchronization in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(SYNCEN0W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 5; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `FAULTEN0`"] pub enum FAULTEN0W { #[doc = "The fault control in this pair of channels is disabled."] _0, #[doc = "The fault control in this pair of channels is enabled."] _1, } impl FAULTEN0W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { FAULTEN0W::_0 => false, FAULTEN0W::_1 => true, } } } #[doc = r" Proxy"] pub struct _FAULTEN0W<'a> { w: &'a mut W, } impl<'a> _FAULTEN0W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: FAULTEN0W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The fault control in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(FAULTEN0W::_0) } #[doc = "The fault control in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(FAULTEN0W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 6; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `COMBINE1`"] pub enum COMBINE1W { #[doc = "Channels (n) and (n+1) are independent."] _0, #[doc = "Channels (n) and (n+1) are combined."] _1, } impl COMBINE1W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { COMBINE1W::_0 => false, COMBINE1W::_1 => true, } } } #[doc = r" Proxy"] pub struct _COMBINE1W<'a> { w: &'a mut W, } impl<'a> _COMBINE1W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: COMBINE1W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Channels (n) and (n+1) are independent."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(COMBINE1W::_0) } #[doc = "Channels (n) and (n+1) are combined."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(COMBINE1W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 8; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `COMP1`"] pub enum COMP1W { #[doc = "The channel (n+1) output is the same as the channel (n) output."] _0, #[doc = "The channel (n+1) output is the complement of the channel (n) output."] _1, } impl COMP1W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { COMP1W::_0 => false, COMP1W::_1 => true, } } } #[doc = r" Proxy"] pub struct _COMP1W<'a> { w: &'a mut W, } impl<'a> _COMP1W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: COMP1W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The channel (n+1) output is the same as the channel (n) output."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(COMP1W::_0) } #[doc = "The channel (n+1) output is the complement of the channel (n) output."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(COMP1W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 9; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DECAPEN1`"] pub enum DECAPEN1W { #[doc = "The Dual Edge Capture mode in this pair of channels is disabled."] _0, #[doc = "The Dual Edge Capture mode in this pair of channels is enabled."] _1, } impl DECAPEN1W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { DECAPEN1W::_0 => false, DECAPEN1W::_1 => true, } } } #[doc = r" Proxy"] pub struct _DECAPEN1W<'a> { w: &'a mut W, } impl<'a> _DECAPEN1W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: DECAPEN1W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The Dual Edge Capture mode in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(DECAPEN1W::_0) } #[doc = "The Dual Edge Capture mode in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(DECAPEN1W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 10; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DECAP1`"] pub enum DECAP1W { #[doc = "The dual edge captures are inactive."] _0, #[doc = "The dual edge captures are active."] _1, } impl DECAP1W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { DECAP1W::_0 => false, DECAP1W::_1 => true, } } } #[doc = r" Proxy"] pub struct _DECAP1W<'a> { w: &'a mut W, } impl<'a> _DECAP1W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: DECAP1W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The dual edge captures are inactive."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(DECAP1W::_0) } #[doc = "The dual edge captures are active."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(DECAP1W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 11; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DTEN1`"] pub enum DTEN1W { #[doc = "The deadtime insertion in this pair of channels is disabled."] _0, #[doc = "The deadtime insertion in this pair of channels is enabled."] _1, } impl DTEN1W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { DTEN1W::_0 => false, DTEN1W::_1 => true, } } } #[doc = r" Proxy"] pub struct _DTEN1W<'a> { w: &'a mut W, } impl<'a> _DTEN1W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: DTEN1W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The deadtime insertion in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(DTEN1W::_0) } #[doc = "The deadtime insertion in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(DTEN1W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 12; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SYNCEN1`"] pub enum SYNCEN1W { #[doc = "The PWM synchronization in this pair of channels is disabled."] _0, #[doc = "The PWM synchronization in this pair of channels is enabled."] _1, } impl SYNCEN1W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { SYNCEN1W::_0 => false, SYNCEN1W::_1 => true, } } } #[doc = r" Proxy"] pub struct _SYNCEN1W<'a> { w: &'a mut W, } impl<'a> _SYNCEN1W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SYNCEN1W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The PWM synchronization in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(SYNCEN1W::_0) } #[doc = "The PWM synchronization in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(SYNCEN1W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 13; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `FAULTEN1`"] pub enum FAULTEN1W { #[doc = "The fault control in this pair of channels is disabled."] _0, #[doc = "The fault control in this pair of channels is enabled."] _1, } impl FAULTEN1W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { FAULTEN1W::_0 => false, FAULTEN1W::_1 => true, } } } #[doc = r" Proxy"] pub struct _FAULTEN1W<'a> { w: &'a mut W, } impl<'a> _FAULTEN1W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: FAULTEN1W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The fault control in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(FAULTEN1W::_0) } #[doc = "The fault control in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(FAULTEN1W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 14; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `COMBINE2`"] pub enum COMBINE2W { #[doc = "Channels (n) and (n+1) are independent."] _0, #[doc = "Channels (n) and (n+1) are combined."] _1, } impl COMBINE2W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { COMBINE2W::_0 => false, COMBINE2W::_1 => true, } } } #[doc = r" Proxy"] pub struct _COMBINE2W<'a> { w: &'a mut W, } impl<'a> _COMBINE2W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: COMBINE2W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Channels (n) and (n+1) are independent."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(COMBINE2W::_0) } #[doc = "Channels (n) and (n+1) are combined."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(COMBINE2W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 16; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `COMP2`"] pub enum COMP2W { #[doc = "The channel (n+1) output is the same as the channel (n) output."] _0, #[doc = "The channel (n+1) output is the complement of the channel (n) output."] _1, } impl COMP2W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { COMP2W::_0 => false, COMP2W::_1 => true, } } } #[doc = r" Proxy"] pub struct _COMP2W<'a> { w: &'a mut W, } impl<'a> _COMP2W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: COMP2W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The channel (n+1) output is the same as the channel (n) output."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(COMP2W::_0) } #[doc = "The channel (n+1) output is the complement of the channel (n) output."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(COMP2W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 17; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DECAPEN2`"] pub enum DECAPEN2W { #[doc = "The Dual Edge Capture mode in this pair of channels is disabled."] _0, #[doc = "The Dual Edge Capture mode in this pair of channels is enabled."] _1, } impl DECAPEN2W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { DECAPEN2W::_0 => false, DECAPEN2W::_1 => true, } } } #[doc = r" Proxy"] pub struct _DECAPEN2W<'a> { w: &'a mut W, } impl<'a> _DECAPEN2W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: DECAPEN2W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The Dual Edge Capture mode in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(DECAPEN2W::_0) } #[doc = "The Dual Edge Capture mode in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(DECAPEN2W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 18; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DECAP2`"] pub enum DECAP2W { #[doc = "The dual edge captures are inactive."] _0, #[doc = "The dual edge captures are active."] _1, } impl DECAP2W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { DECAP2W::_0 => false, DECAP2W::_1 => true, } } } #[doc = r" Proxy"] pub struct _DECAP2W<'a> { w: &'a mut W, } impl<'a> _DECAP2W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: DECAP2W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The dual edge captures are inactive."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(DECAP2W::_0) } #[doc = "The dual edge captures are active."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(DECAP2W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 19; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DTEN2`"] pub enum DTEN2W { #[doc = "The deadtime insertion in this pair of channels is disabled."] _0, #[doc = "The deadtime insertion in this pair of channels is enabled."] _1, } impl DTEN2W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { DTEN2W::_0 => false, DTEN2W::_1 => true, } } } #[doc = r" Proxy"] pub struct _DTEN2W<'a> { w: &'a mut W, } impl<'a> _DTEN2W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: DTEN2W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The deadtime insertion in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(DTEN2W::_0) } #[doc = "The deadtime insertion in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(DTEN2W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 20; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SYNCEN2`"] pub enum SYNCEN2W { #[doc = "The PWM synchronization in this pair of channels is disabled."] _0, #[doc = "The PWM synchronization in this pair of channels is enabled."] _1, } impl SYNCEN2W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { SYNCEN2W::_0 => false, SYNCEN2W::_1 => true, } } } #[doc = r" Proxy"] pub struct _SYNCEN2W<'a> { w: &'a mut W, } impl<'a> _SYNCEN2W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SYNCEN2W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The PWM synchronization in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(SYNCEN2W::_0) } #[doc = "The PWM synchronization in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(SYNCEN2W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 21; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `FAULTEN2`"] pub enum FAULTEN2W { #[doc = "The fault control in this pair of channels is disabled."] _0, #[doc = "The fault control in this pair of channels is enabled."] _1, } impl FAULTEN2W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { FAULTEN2W::_0 => false, FAULTEN2W::_1 => true, } } } #[doc = r" Proxy"] pub struct _FAULTEN2W<'a> { w: &'a mut W, } impl<'a> _FAULTEN2W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: FAULTEN2W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The fault control in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(FAULTEN2W::_0) } #[doc = "The fault control in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(FAULTEN2W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 22; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `COMBINE3`"] pub enum COMBINE3W { #[doc = "Channels (n) and (n+1) are independent."] _0, #[doc = "Channels (n) and (n+1) are combined."] _1, } impl COMBINE3W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { COMBINE3W::_0 => false, COMBINE3W::_1 => true, } } } #[doc = r" Proxy"] pub struct _COMBINE3W<'a> { w: &'a mut W, } impl<'a> _COMBINE3W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: COMBINE3W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Channels (n) and (n+1) are independent."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(COMBINE3W::_0) } #[doc = "Channels (n) and (n+1) are combined."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(COMBINE3W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 24; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `COMP3`"] pub enum COMP3W { #[doc = "The channel (n+1) output is the same as the channel (n) output."] _0, #[doc = "The channel (n+1) output is the complement of the channel (n) output."] _1, } impl COMP3W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { COMP3W::_0 => false, COMP3W::_1 => true, } } } #[doc = r" Proxy"] pub struct _COMP3W<'a> { w: &'a mut W, } impl<'a> _COMP3W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: COMP3W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The channel (n+1) output is the same as the channel (n) output."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(COMP3W::_0) } #[doc = "The channel (n+1) output is the complement of the channel (n) output."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(COMP3W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 25; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DECAPEN3`"] pub enum DECAPEN3W { #[doc = "The Dual Edge Capture mode in this pair of channels is disabled."] _0, #[doc = "The Dual Edge Capture mode in this pair of channels is enabled."] _1, } impl DECAPEN3W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { DECAPEN3W::_0 => false, DECAPEN3W::_1 => true, } } } #[doc = r" Proxy"] pub struct _DECAPEN3W<'a> { w: &'a mut W, } impl<'a> _DECAPEN3W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: DECAPEN3W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The Dual Edge Capture mode in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(DECAPEN3W::_0) } #[doc = "The Dual Edge Capture mode in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(DECAPEN3W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 26; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DECAP3`"] pub enum DECAP3W { #[doc = "The dual edge captures are inactive."] _0, #[doc = "The dual edge captures are active."] _1, } impl DECAP3W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { DECAP3W::_0 => false, DECAP3W::_1 => true, } } } #[doc = r" Proxy"] pub struct _DECAP3W<'a> { w: &'a mut W, } impl<'a> _DECAP3W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: DECAP3W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The dual edge captures are inactive."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(DECAP3W::_0) } #[doc = "The dual edge captures are active."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(DECAP3W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 27; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DTEN3`"] pub enum DTEN3W { #[doc = "The deadtime insertion in this pair of channels is disabled."] _0, #[doc = "The deadtime insertion in this pair of channels is enabled."] _1, } impl DTEN3W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { DTEN3W::_0 => false, DTEN3W::_1 => true, } } } #[doc = r" Proxy"] pub struct _DTEN3W<'a> { w: &'a mut W, } impl<'a> _DTEN3W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: DTEN3W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The deadtime insertion in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(DTEN3W::_0) } #[doc = "The deadtime insertion in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(DTEN3W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 28; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SYNCEN3`"] pub enum SYNCEN3W { #[doc = "The PWM synchronization in this pair of channels is disabled."] _0, #[doc = "The PWM synchronization in this pair of channels is enabled."] _1, } impl SYNCEN3W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { SYNCEN3W::_0 => false, SYNCEN3W::_1 => true, } } } #[doc = r" Proxy"] pub struct _SYNCEN3W<'a> { w: &'a mut W, } impl<'a> _SYNCEN3W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SYNCEN3W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The PWM synchronization in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(SYNCEN3W::_0) } #[doc = "The PWM synchronization in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(SYNCEN3W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 29; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `FAULTEN3`"] pub enum FAULTEN3W { #[doc = "The fault control in this pair of channels is disabled."] _0, #[doc = "The fault control in this pair of channels is enabled."] _1, } impl FAULTEN3W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { FAULTEN3W::_0 => false, FAULTEN3W::_1 => true, } } } #[doc = r" Proxy"] pub struct _FAULTEN3W<'a> { w: &'a mut W, } impl<'a> _FAULTEN3W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: FAULTEN3W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "The fault control in this pair of channels is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(FAULTEN3W::_0) } #[doc = "The fault control in this pair of channels is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(FAULTEN3W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 30; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bit 0 - Combine Channels For n = 0"] #[inline] pub fn combine0(&self) -> COMBINE0R { COMBINE0R::_from({ const MASK: bool = true; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 1 - Complement Of Channel (n) For n = 0"] #[inline] pub fn comp0(&self) -> COMP0R { COMP0R::_from({ const MASK: bool = true; const OFFSET: u8 = 1; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 2 - Dual Edge Capture Mode Enable For n = 0"] #[inline] pub fn decapen0(&self) -> DECAPEN0R { DECAPEN0R::_from({ const MASK: bool = true; const OFFSET: u8 = 2; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 3 - Dual Edge Capture Mode Captures For n = 0"] #[inline] pub fn decap0(&self) -> DECAP0R { DECAP0R::_from({ const MASK: bool = true; const OFFSET: u8 = 3; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 4 - Deadtime Enable For n = 0"] #[inline] pub fn dten0(&self) -> DTEN0R { DTEN0R::_from({ const MASK: bool = true; const OFFSET: u8 = 4; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 5 - Synchronization Enable For n = 0"] #[inline] pub fn syncen0(&self) -> SYNCEN0R { SYNCEN0R::_from({ const MASK: bool = true; const OFFSET: u8 = 5; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 6 - Fault Control Enable For n = 0"] #[inline] pub fn faulten0(&self) -> FAULTEN0R { FAULTEN0R::_from({ const MASK: bool = true; const OFFSET: u8 = 6; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 8 - Combine Channels For n = 2"] #[inline] pub fn combine1(&self) -> COMBINE1R { COMBINE1R::_from({ const MASK: bool = true; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 9 - Complement Of Channel (n) For n = 2"] #[inline] pub fn comp1(&self) -> COMP1R { COMP1R::_from({ const MASK: bool = true; const OFFSET: u8 = 9; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 10 - Dual Edge Capture Mode Enable For n = 2"] #[inline] pub fn decapen1(&self) -> DECAPEN1R { DECAPEN1R::_from({ const MASK: bool = true; const OFFSET: u8 = 10; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 11 - Dual Edge Capture Mode Captures For n = 2"] #[inline] pub fn decap1(&self) -> DECAP1R { DECAP1R::_from({ const MASK: bool = true; const OFFSET: u8 = 11; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 12 - Deadtime Enable For n = 2"] #[inline] pub fn dten1(&self) -> DTEN1R { DTEN1R::_from({ const MASK: bool = true; const OFFSET: u8 = 12; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 13 - Synchronization Enable For n = 2"] #[inline] pub fn syncen1(&self) -> SYNCEN1R { SYNCEN1R::_from({ const MASK: bool = true; const OFFSET: u8 = 13; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 14 - Fault Control Enable For n = 2"] #[inline] pub fn faulten1(&self) -> FAULTEN1R { FAULTEN1R::_from({ const MASK: bool = true; const OFFSET: u8 = 14; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 16 - Combine Channels For n = 4"] #[inline] pub fn combine2(&self) -> COMBINE2R { COMBINE2R::_from({ const MASK: bool = true; const OFFSET: u8 = 16; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 17 - Complement Of Channel (n) For n = 4"] #[inline] pub fn comp2(&self) -> COMP2R { COMP2R::_from({ const MASK: bool = true; const OFFSET: u8 = 17; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 18 - Dual Edge Capture Mode Enable For n = 4"] #[inline] pub fn decapen2(&self) -> DECAPEN2R { DECAPEN2R::_from({ const MASK: bool = true; const OFFSET: u8 = 18; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 19 - Dual Edge Capture Mode Captures For n = 4"] #[inline] pub fn decap2(&self) -> DECAP2R { DECAP2R::_from({ const MASK: bool = true; const OFFSET: u8 = 19; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 20 - Deadtime Enable For n = 4"] #[inline] pub fn dten2(&self) -> DTEN2R { DTEN2R::_from({ const MASK: bool = true; const OFFSET: u8 = 20; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 21 - Synchronization Enable For n = 4"] #[inline] pub fn syncen2(&self) -> SYNCEN2R { SYNCEN2R::_from({ const MASK: bool = true; const OFFSET: u8 = 21; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 22 - Fault Control Enable For n = 4"] #[inline] pub fn faulten2(&self) -> FAULTEN2R { FAULTEN2R::_from({ const MASK: bool = true; const OFFSET: u8 = 22; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 24 - Combine Channels For n = 6"] #[inline] pub fn combine3(&self) -> COMBINE3R { COMBINE3R::_from({ const MASK: bool = true; const OFFSET: u8 = 24; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 25 - Complement Of Channel (n) for n = 6"] #[inline] pub fn comp3(&self) -> COMP3R { COMP3R::_from({ const MASK: bool = true; const OFFSET: u8 = 25; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 26 - Dual Edge Capture Mode Enable For n = 6"] #[inline] pub fn decapen3(&self) -> DECAPEN3R { DECAPEN3R::_from({ const MASK: bool = true; const OFFSET: u8 = 26; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 27 - Dual Edge Capture Mode Captures For n = 6"] #[inline] pub fn decap3(&self) -> DECAP3R { DECAP3R::_from({ const MASK: bool = true; const OFFSET: u8 = 27; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 28 - Deadtime Enable For n = 6"] #[inline] pub fn dten3(&self) -> DTEN3R { DTEN3R::_from({ const MASK: bool = true; const OFFSET: u8 = 28; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 29 - Synchronization Enable For n = 6"] #[inline] pub fn syncen3(&self) -> SYNCEN3R { SYNCEN3R::_from({ const MASK: bool = true; const OFFSET: u8 = 29; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 30 - Fault Control Enable For n = 6"] #[inline] pub fn faulten3(&self) -> FAULTEN3R { FAULTEN3R::_from({ const MASK: bool = true; const OFFSET: u8 = 30; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bit 0 - Combine Channels For n = 0"] #[inline] pub fn combine0(&mut self) -> _COMBINE0W { _COMBINE0W { w: self } } #[doc = "Bit 1 - Complement Of Channel (n) For n = 0"] #[inline] pub fn comp0(&mut self) -> _COMP0W { _COMP0W { w: self } } #[doc = "Bit 2 - Dual Edge Capture Mode Enable For n = 0"] #[inline] pub fn decapen0(&mut self) -> _DECAPEN0W { _DECAPEN0W { w: self } } #[doc = "Bit 3 - Dual Edge Capture Mode Captures For n = 0"] #[inline] pub fn decap0(&mut self) -> _DECAP0W { _DECAP0W { w: self } } #[doc = "Bit 4 - Deadtime Enable For n = 0"] #[inline] pub fn dten0(&mut self) -> _DTEN0W { _DTEN0W { w: self } } #[doc = "Bit 5 - Synchronization Enable For n = 0"] #[inline] pub fn syncen0(&mut self) -> _SYNCEN0W { _SYNCEN0W { w: self } } #[doc = "Bit 6 - Fault Control Enable For n = 0"] #[inline] pub fn faulten0(&mut self) -> _FAULTEN0W { _FAULTEN0W { w: self } } #[doc = "Bit 8 - Combine Channels For n = 2"] #[inline] pub fn combine1(&mut self) -> _COMBINE1W { _COMBINE1W { w: self } } #[doc = "Bit 9 - Complement Of Channel (n) For n = 2"] #[inline] pub fn comp1(&mut self) -> _COMP1W { _COMP1W { w: self } } #[doc = "Bit 10 - Dual Edge Capture Mode Enable For n = 2"] #[inline] pub fn decapen1(&mut self) -> _DECAPEN1W { _DECAPEN1W { w: self } } #[doc = "Bit 11 - Dual Edge Capture Mode Captures For n = 2"] #[inline] pub fn decap1(&mut self) -> _DECAP1W { _DECAP1W { w: self } } #[doc = "Bit 12 - Deadtime Enable For n = 2"] #[inline] pub fn dten1(&mut self) -> _DTEN1W { _DTEN1W { w: self } } #[doc = "Bit 13 - Synchronization Enable For n = 2"] #[inline] pub fn syncen1(&mut self) -> _SYNCEN1W { _SYNCEN1W { w: self } } #[doc = "Bit 14 - Fault Control Enable For n = 2"] #[inline] pub fn faulten1(&mut self) -> _FAULTEN1W { _FAULTEN1W { w: self } } #[doc = "Bit 16 - Combine Channels For n = 4"] #[inline] pub fn combine2(&mut self) -> _COMBINE2W { _COMBINE2W { w: self } } #[doc = "Bit 17 - Complement Of Channel (n) For n = 4"] #[inline] pub fn comp2(&mut self) -> _COMP2W { _COMP2W { w: self } } #[doc = "Bit 18 - Dual Edge Capture Mode Enable For n = 4"] #[inline] pub fn decapen2(&mut self) -> _DECAPEN2W { _DECAPEN2W { w: self } } #[doc = "Bit 19 - Dual Edge Capture Mode Captures For n = 4"] #[inline] pub fn decap2(&mut self) -> _DECAP2W { _DECAP2W { w: self } } #[doc = "Bit 20 - Deadtime Enable For n = 4"] #[inline] pub fn dten2(&mut self) -> _DTEN2W { _DTEN2W { w: self } } #[doc = "Bit 21 - Synchronization Enable For n = 4"] #[inline] pub fn syncen2(&mut self) -> _SYNCEN2W { _SYNCEN2W { w: self } } #[doc = "Bit 22 - Fault Control Enable For n = 4"] #[inline] pub fn faulten2(&mut self) -> _FAULTEN2W { _FAULTEN2W { w: self } } #[doc = "Bit 24 - Combine Channels For n = 6"] #[inline] pub fn combine3(&mut self) -> _COMBINE3W { _COMBINE3W { w: self } } #[doc = "Bit 25 - Complement Of Channel (n) for n = 6"] #[inline] pub fn comp3(&mut self) -> _COMP3W { _COMP3W { w: self } } #[doc = "Bit 26 - Dual Edge Capture Mode Enable For n = 6"] #[inline] pub fn decapen3(&mut self) -> _DECAPEN3W { _DECAPEN3W { w: self } } #[doc = "Bit 27 - Dual Edge Capture Mode Captures For n = 6"] #[inline] pub fn decap3(&mut self) -> _DECAP3W { _DECAP3W { w: self } } #[doc = "Bit 28 - Deadtime Enable For n = 6"] #[inline] pub fn dten3(&mut self) -> _DTEN3W { _DTEN3W { w: self } } #[doc = "Bit 29 - Synchronization Enable For n = 6"] #[inline] pub fn syncen3(&mut self) -> _SYNCEN3W { _SYNCEN3W { w: self } } #[doc = "Bit 30 - Fault Control Enable For n = 6"] #[inline] pub fn faulten3(&mut self) -> _FAULTEN3W { _FAULTEN3W { w: self } } }
{ !self.bit() }
hyperscaler.go
package hyperscaler import ( "fmt" "strings" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) type Platform string const ( Gke Platform = "GKE" Aks Platform = "AKS" GardenerAzure Platform = "gardenerAzure" GardenerGcp Platform = "gardenerGcp" UnknownGardener Platform = "unknownGardener" Unknown Platform = "unknown" ) const ( shootCmNamespace = "kube-config" shootCmName = "shoot-info" ) func extractHyperScalerFromCm(configmap corev1.ConfigMap) (Platform, error) { providerKey := "provider" provider, ok := configmap.Data[providerKey] if !ok { return UnknownGardener, fmt.Errorf("%s confimap in namespace %s is malformed, there's no %s key", shootCmName, shootCmNamespace, providerKey) } switch provider { case "azure": return GardenerAzure, nil case "gcp": return GardenerGcp, nil
func extractHyperScalerFromNode(node corev1.Node) Platform { if strings.HasPrefix(node.Name, "gke") { return Gke } if strings.HasPrefix(node.Name, "aks") { return Aks } return Unknown } func GetHyperScalerPlatform(clientset *kubernetes.Clientset) (Platform, error) { cm, err := clientset.CoreV1().ConfigMaps(shootCmNamespace).Get(shootCmName, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return "", err } if err != nil && apierrors.IsNotFound(err) { nodes, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { return "", err } return extractHyperScalerFromNode(nodes.Items[0]), nil } return extractHyperScalerFromCm(*cm) }
default: return UnknownGardener, nil } }
NotFound.js
import React from 'react';
const NotFound = () => { return ( <div> <h1>Not Found</h1> <p class='lead'>The page you are looking for does not exist.</p> </div> ); }; export default NotFound;
map_test.go
package treemap import ( "fmt" "testing" "github.com/stretchr/testify/require" ) var testData1 = []string{ "..##.......", "#...#...#..", ".#....#..#.", "..#.#...#.#", ".#...##..#.", "..#.##.....", ".#.#.#....#", ".#........#", "#.##...#...", "#...##....#", ".#..#...#.#", } func
(t *testing.T) { m := ParseTreeMap(testData1) trees := m.CountTrees(0, 1) require.Equal(t, 3, trees) trees = m.CountTrees(1, 1) require.Equal(t, 2, trees) trees = m.CountTrees(0, 2) require.Equal(t, 1, trees) } func TestParseTreeMap(t *testing.T) { m := ParseTreeMap(testData1) require.Equal(t, OpenGround, MapElement(m.data[0][0])) require.Equal(t, Tree, MapElement(m.data[0][2])) require.Equal(t, 11, m.mapWidth) } func TestElementAt(t *testing.T) { m := ParseTreeMap(testData1) require.Equal(t, OpenGround, m.ElementAt(0, 0)) require.Equal(t, Tree, m.ElementAt(2, 0)) require.Equal(t, Tree, m.ElementAt(13, 0)) } func TestMultipleSlopes(t *testing.T) { tm := ParseTreeMap(testData1) treeCounts := []int{} treeCounts = append(treeCounts, tm.CountTrees(1, 1)) treeCounts = append(treeCounts, tm.CountTrees(3, 1)) treeCounts = append(treeCounts, tm.CountTrees(5, 1)) treeCounts = append(treeCounts, tm.CountTrees(7, 1)) treeCounts = append(treeCounts, tm.CountTrees(1, 2)) total := 1 for _, t := range treeCounts { total = total * t } fmt.Printf("trees: %v\n", treeCounts) require.Equal(t, 336, total) }
TestCountTrees
startQiskit1590.py
# qubit number=5 # total number=50 import cirq import qiskit from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister from qiskit import BasicAer, execute, transpile from pprint import pprint from qiskit.test.mock import FakeVigo from math import log2,floor, sqrt, pi import numpy as np import networkx as nx def build_oracle(n: int, f) -> QuantumCircuit: # implement the oracle O_f^\pm # NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate # or multi_control_Z_gate (issue #127) controls = QuantumRegister(n, "ofc") oracle = QuantumCircuit(controls, name="Zf") for i in range(2 ** n): rep = np.binary_repr(i, n) if f(rep) == "1": for j in range(n): if rep[j] == "0": oracle.x(controls[j]) # oracle.h(controls[n]) if n >= 2: oracle.mcu1(pi, controls[1:], controls[0]) for j in range(n): if rep[j] == "0": oracle.x(controls[j]) # oracle.barrier() return oracle def make_circuit(n:int,f) -> QuantumCircuit: # circuit begin input_qubit = QuantumRegister(n,"qc") classical = ClassicalRegister(n, "qm") prog = QuantumCircuit(input_qubit, classical) prog.h(input_qubit[0]) # number=3 prog.h(input_qubit[1]) # number=4 prog.h(input_qubit[2]) # number=5 prog.h(input_qubit[3]) # number=6 prog.h(input_qubit[4]) # number=21 prog.h(input_qubit[0]) # number=44 prog.cz(input_qubit[3],input_qubit[0]) # number=45 prog.h(input_qubit[0]) # number=46 prog.cx(input_qubit[3],input_qubit[0]) # number=47 prog.z(input_qubit[3]) # number=48 prog.cx(input_qubit[3],input_qubit[0]) # number=49 prog.cx(input_qubit[3],input_qubit[0]) # number=34 prog.rx(0.11938052083641225,input_qubit[1]) # number=36 Zf = build_oracle(n, f) repeat = floor(sqrt(2 ** n) * pi / 4) for i in range(repeat): prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)]) prog.h(input_qubit[0]) # number=1 prog.rx(1.4765485471872026,input_qubit[2]) # number=35 prog.h(input_qubit[1]) # number=2 prog.h(input_qubit[2]) # number=7 prog.h(input_qubit[3]) # number=8 prog.cx(input_qubit[1],input_qubit[0]) # number=41 prog.x(input_qubit[0]) # number=42 prog.cx(input_qubit[1],input_qubit[0]) # number=43 prog.x(input_qubit[4]) # number=30 prog.x(input_qubit[1]) # number=10 prog.x(input_qubit[2]) # number=11 prog.rx(0.45238934211692994,input_qubit[3]) # number=38 prog.y(input_qubit[1]) # number=39 prog.rx(-2.5258404934861938,input_qubit[1]) # number=25 prog.h(input_qubit[3]) # number=29 prog.cx(input_qubit[0],input_qubit[3]) # number=22 prog.x(input_qubit[3]) # number=23 prog.cx(input_qubit[0],input_qubit[3]) # number=24 if n>=2: prog.mcu1(pi,input_qubit[1:],input_qubit[0]) prog.x(input_qubit[0]) # number=13 prog.rx(-0.0722566310325653,input_qubit[4]) # number=37 prog.x(input_qubit[1]) # number=14 prog.cx(input_qubit[0],input_qubit[2]) # number=26 prog.x(input_qubit[2]) # number=27 prog.h(input_qubit[4]) # number=40 prog.cx(input_qubit[0],input_qubit[2]) # number=28 prog.x(input_qubit[3]) # number=16 prog.h(input_qubit[0]) # number=17 prog.h(input_qubit[1]) # number=18 prog.h(input_qubit[2]) # number=19 prog.h(input_qubit[3]) # number=20 # circuit end for i in range(n): prog.measure(input_qubit[i], classical[i]) return prog if __name__ == '__main__': key = "00000" f = lambda rep: str(int(rep == key)) prog = make_circuit(5,f) backend = BasicAer.get_backend('qasm_simulator') sample_shot =7924
writefile = open("../data/startQiskit1590.csv","w") print(info,file=writefile) print("results end", file=writefile) print(circuit1.depth(),file=writefile) print(circuit1,file=writefile) writefile.close()
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts() backend = FakeVigo() circuit1 = transpile(prog,backend,optimization_level=2)
repository.go
package client import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "strconv" "time" "github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/distribution" "github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/distribution/context" "github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/distribution/digest" "github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/distribution/manifest" "github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2" "github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport" "github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache" "github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory" ) // Registry provides an interface for calling Repositories, which returns a catalog of repositories. type Registry interface { Repositories(ctx context.Context, repos []string, last string) (n int, err error) } // NewRegistry creates a registry namespace which can be used to get a listing of repositories func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { ub, err := v2.NewURLBuilderFromString(baseURL) if err != nil { return nil, err } client := &http.Client{ Transport: transport, Timeout: 1 * time.Minute, } return &registry{ client: client, ub: ub, context: ctx, }, nil } type registry struct { client *http.Client ub *v2.URLBuilder context context.Context } // Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size // of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there // are no more entries func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { var numFilled int var returnErr error values := buildCatalogValues(len(entries), last) u, err := r.ub.BuildCatalogURL(values) if err != nil { return 0, err } resp, err := r.client.Get(u) if err != nil { return 0, err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { var ctlg struct { Repositories []string `json:"repositories"` } decoder := json.NewDecoder(resp.Body) if err := decoder.Decode(&ctlg); err != nil { return 0, err } for cnt := range ctlg.Repositories { entries[cnt] = ctlg.Repositories[cnt] } numFilled = len(ctlg.Repositories) link := resp.Header.Get("Link") if link == "" { returnErr = io.EOF } } else { return 0, handleErrorResponse(resp) } return numFilled, returnErr } // NewRepository creates a new Repository for the given repository name and base URL func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { if err := v2.ValidateRepositoryName(name); err != nil { return nil, err } ub, err := v2.NewURLBuilderFromString(baseURL) if err != nil { return nil, err } client := &http.Client{ Transport: transport, // TODO(dmcgowan): create cookie jar } return &repository{ client: client, ub: ub, name: name, context: ctx, }, nil } type repository struct { client *http.Client ub *v2.URLBuilder context context.Context name string } func (r *repository) Name() string { return r.name } func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { statter := &blobStatter{ name: r.Name(), ub: r.ub, client: r.client, } return &blobs{ name: r.Name(), ub: r.ub, client: r.client, statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), } } func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { // todo(richardscothern): options should be sent over the wire return &manifests{ name: r.Name(), ub: r.ub, client: r.client, etags: make(map[string]string), }, nil } func (r *repository) Signatures() distribution.SignatureService { ms, _ := r.Manifests(r.context) return &signatures{ manifests: ms, } } type signatures struct { manifests distribution.ManifestService } func (s *signatures) Get(dgst digest.Digest) ([][]byte, error) { m, err := s.manifests.Get(dgst) if err != nil { return nil, err } return m.Signatures() } func (s *signatures) Put(dgst digest.Digest, signatures ...[]byte) error { panic("not implemented") } type manifests struct { name string ub *v2.URLBuilder client *http.Client etags map[string]string } func (ms *manifests) Tags() ([]string, error) { u, err := ms.ub.BuildTagsURL(ms.name) if err != nil { return nil, err } resp, err := ms.client.Get(u) if err != nil { return nil, err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { b, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } tagsResponse := struct { Tags []string `json:"tags"` }{} if err := json.Unmarshal(b, &tagsResponse); err != nil { return nil, err } return tagsResponse.Tags, nil } else if resp.StatusCode == http.StatusNotFound { return nil, nil } return nil, handleErrorResponse(resp) } func (ms *manifests) Exists(dgst digest.Digest) (bool, error) { // Call by Tag endpoint since the API uses the same // URL endpoint for tags and digests. return ms.ExistsByTag(dgst.String()) } func (ms *manifests) ExistsByTag(tag string) (bool, error) { u, err := ms.ub.BuildManifestURL(ms.name, tag) if err != nil { return false, err } resp, err := ms.client.Head(u) if err != nil { return false, err } if SuccessStatus(resp.StatusCode) { return true, nil } else if resp.StatusCode == http.StatusNotFound { return false, nil } return false, handleErrorResponse(resp) } func (ms *manifests) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { // Call by Tag endpoint since the API uses the same // URL endpoint for tags and digests. return ms.GetByTag(dgst.String()) } // AddEtagToTag allows a client to supply an eTag to GetByTag which will be // used for a conditional HTTP request. If the eTag matches, a nil manifest // and nil error will be returned. etag is automatically quoted when added to // this map. func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { return func(ms distribution.ManifestService) error { if ms, ok := ms.(*manifests); ok { ms.etags[tag] = fmt.Sprintf(`"%s"`, etag) return nil } return fmt.Errorf("etag options is a client-only option") } } func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { for _, option := range options { err := option(ms) if err != nil { return nil, err } } u, err := ms.ub.BuildManifestURL(ms.name, tag) if err != nil { return nil, err } req, err := http.NewRequest("GET", u, nil) if err != nil { return nil, err } if _, ok := ms.etags[tag]; ok { req.Header.Set("eTag", ms.etags[tag]) } resp, err := ms.client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotModified { return nil, nil } else if SuccessStatus(resp.StatusCode) { var sm manifest.SignedManifest decoder := json.NewDecoder(resp.Body) if err := decoder.Decode(&sm); err != nil { return nil, err } return &sm, nil } return nil, handleErrorResponse(resp) } func (ms *manifests) Put(m *manifest.SignedManifest) error { manifestURL, err := ms.ub.BuildManifestURL(ms.name, m.Tag) if err != nil { return err } // todo(richardscothern): do something with options here when they become applicable putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(m.Raw)) if err != nil { return err } resp, err := ms.client.Do(putRequest) if err != nil { return err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { // TODO(dmcgowan): make use of digest header return nil } return handleErrorResponse(resp) } func (ms *manifests) Delete(dgst digest.Digest) error { u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) if err != nil { return err } req, err := http.NewRequest("DELETE", u, nil) if err != nil { return err } resp, err := ms.client.Do(req) if err != nil { return err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { return nil } return handleErrorResponse(resp) } type blobs struct { name string ub *v2.URLBuilder client *http.Client statter distribution.BlobDescriptorService distribution.BlobDeleter } func
(location, source string) (string, error) { locationURL, err := url.Parse(location) if err != nil { return "", err } if locationURL.Scheme == "" { sourceURL, err := url.Parse(source) if err != nil { return "", err } locationURL = &url.URL{ Scheme: sourceURL.Scheme, Host: sourceURL.Host, Path: location, } location = locationURL.String() } return location, nil } func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { return bs.statter.Stat(ctx, dgst) } func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { desc, err := bs.Stat(ctx, dgst) if err != nil { return nil, err } reader, err := bs.Open(ctx, desc.Digest) if err != nil { return nil, err } defer reader.Close() return ioutil.ReadAll(reader) } func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { stat, err := bs.statter.Stat(ctx, dgst) if err != nil { return nil, err } blobURL, err := bs.ub.BuildBlobURL(bs.name, stat.Digest) if err != nil { return nil, err } return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Size), nil } func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { panic("not implemented") } func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { writer, err := bs.Create(ctx) if err != nil { return distribution.Descriptor{}, err } dgstr := digest.Canonical.New() n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) if err != nil { return distribution.Descriptor{}, err } if n < int64(len(p)) { return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) } desc := distribution.Descriptor{ MediaType: mediaType, Size: int64(len(p)), Digest: dgstr.Digest(), } return writer.Commit(ctx, desc) } func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { u, err := bs.ub.BuildBlobUploadURL(bs.name) resp, err := bs.client.Post(u, "", nil) if err != nil { return nil, err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { // TODO(dmcgowan): Check for invalid UUID uuid := resp.Header.Get("Docker-Upload-UUID") location, err := sanitizeLocation(resp.Header.Get("Location"), u) if err != nil { return nil, err } return &httpBlobUpload{ statter: bs.statter, client: bs.client, uuid: uuid, startedAt: time.Now(), location: location, }, nil } return nil, handleErrorResponse(resp) } func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { panic("not implemented") } func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { return bs.statter.Clear(ctx, dgst) } type blobStatter struct { name string ub *v2.URLBuilder client *http.Client } func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { u, err := bs.ub.BuildBlobURL(bs.name, dgst) if err != nil { return distribution.Descriptor{}, err } resp, err := bs.client.Head(u) if err != nil { return distribution.Descriptor{}, err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { lengthHeader := resp.Header.Get("Content-Length") length, err := strconv.ParseInt(lengthHeader, 10, 64) if err != nil { return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) } return distribution.Descriptor{ MediaType: resp.Header.Get("Content-Type"), Size: length, Digest: dgst, }, nil } else if resp.StatusCode == http.StatusNotFound { return distribution.Descriptor{}, distribution.ErrBlobUnknown } return distribution.Descriptor{}, handleErrorResponse(resp) } func buildCatalogValues(maxEntries int, last string) url.Values { values := url.Values{} if maxEntries > 0 { values.Add("n", strconv.Itoa(maxEntries)) } if last != "" { values.Add("last", last) } return values } func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) if err != nil { return err } req, err := http.NewRequest("DELETE", blobURL, nil) if err != nil { return err } resp, err := bs.client.Do(req) if err != nil { return err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { return nil } return handleErrorResponse(resp) } func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { return nil }
sanitizeLocation
GaussianRandomStockPrice.py
import pandas as pd import numpy as np import yfinance as yf from sklearn.linear_model import LinearRegression import statsmodels import statsmodels.api as sm import statsmodels.tsa.stattools as ts import datetime import scipy.stats import math import openpyxl as pyxl from scipy import signal from scipy import stats as ss import statistics from finta import TA from filterpy.kalman import KalmanFilter from filterpy.common import Q_discrete_white_noise import pandas_ta as ta from pingouin import gzscore def GaussianRandomStockPrice(mu, sigma, n, end, freq, S0=100):
""" This function randomly creates a stock price series bases on gaussian probabilities. Arguments: ---------- - mu: float The mean parameter - sigma: float The standard déviation parameter - n: int Number of periods - end: datetime date The last date of thé series - freq: pandas frequency string The frequency of thé dataseries: - "D": days - "min": minutes - "s": seconds - S0: float The first stock price Return: ---------- - RStock: Pandas DataFrame Contains thé datetime as index and thé random stock prices in a column """ RStock = np.random.normal(mu, sigma, n).astype("float") RStock = pd.DataFrame(RStock) RStock.rename(inplace=True, columns={RStock.columns[0]: "Return"}) RStock["Price"] = ((1 + RStock["Return"]).cumprod()) * S0 times = pd.date_range(end=end, freq=freq, periods=n) RStock.index = times RStock = pd.DataFrame(RStock["Price"]) return RStock
cr.rs
#[doc = "Reader of register CR"] pub type R = crate::R<u32, super::CR>; #[doc = "Writer for register CR"] pub type W = crate::W<u32, super::CR>; #[doc = "Register CR `reset()`'s with value 0"] impl crate::ResetValue for super::CR { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `ADCAL`"] pub type ADCAL_R = crate::R<bool, bool>; #[doc = "Write proxy for field `ADCAL`"] pub struct ADCAL_W<'a> { w: &'a mut W, } impl<'a> ADCAL_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31); self.w } } #[doc = "Reader of field `ADSTP`"] pub type ADSTP_R = crate::R<bool, bool>; #[doc = "Write proxy for field `ADSTP`"] pub struct ADSTP_W<'a> { w: &'a mut W, } impl<'a> ADSTP_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4); self.w }
} #[doc = "Reader of field `ADSTART`"] pub type ADSTART_R = crate::R<bool, bool>; #[doc = "Write proxy for field `ADSTART`"] pub struct ADSTART_W<'a> { w: &'a mut W, } impl<'a> ADSTART_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2); self.w } } #[doc = "Reader of field `ADDIS`"] pub type ADDIS_R = crate::R<bool, bool>; #[doc = "Write proxy for field `ADDIS`"] pub struct ADDIS_W<'a> { w: &'a mut W, } impl<'a> ADDIS_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1); self.w } } #[doc = "Reader of field `ADEN`"] pub type ADEN_R = crate::R<bool, bool>; #[doc = "Write proxy for field `ADEN`"] pub struct ADEN_W<'a> { w: &'a mut W, } impl<'a> ADEN_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } impl R { #[doc = "Bit 31 - ADC calibration"] #[inline(always)] pub fn adcal(&self) -> ADCAL_R { ADCAL_R::new(((self.bits >> 31) & 0x01) != 0) } #[doc = "Bit 4 - ADC stop conversion command"] #[inline(always)] pub fn adstp(&self) -> ADSTP_R { ADSTP_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 2 - ADC start conversion command"] #[inline(always)] pub fn adstart(&self) -> ADSTART_R { ADSTART_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 1 - ADC disable command"] #[inline(always)] pub fn addis(&self) -> ADDIS_R { ADDIS_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 0 - ADC enable command"] #[inline(always)] pub fn aden(&self) -> ADEN_R { ADEN_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bit 31 - ADC calibration"] #[inline(always)] pub fn adcal(&mut self) -> ADCAL_W { ADCAL_W { w: self } } #[doc = "Bit 4 - ADC stop conversion command"] #[inline(always)] pub fn adstp(&mut self) -> ADSTP_W { ADSTP_W { w: self } } #[doc = "Bit 2 - ADC start conversion command"] #[inline(always)] pub fn adstart(&mut self) -> ADSTART_W { ADSTART_W { w: self } } #[doc = "Bit 1 - ADC disable command"] #[inline(always)] pub fn addis(&mut self) -> ADDIS_W { ADDIS_W { w: self } } #[doc = "Bit 0 - ADC enable command"] #[inline(always)] pub fn aden(&mut self) -> ADEN_W { ADEN_W { w: self } } }
ICassandraClient.ts
import {Observable} from "rxjs"; export interface ICassandraClient { execute(query: IQuery): Observable<any>; paginate(query: IQuery, completions: Observable<string>): Observable<any>; }
export type IQuery = [string, any]
wires.go
// Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package models import ( "context" "database/sql" "fmt" "yunion.io/x/jsonutils" "yunion.io/x/log" "yunion.io/x/pkg/errors" "yunion.io/x/pkg/gotypes" "yunion.io/x/pkg/tristate" "yunion.io/x/pkg/util/compare" "yunion.io/x/pkg/util/netutils" "yunion.io/x/pkg/util/sets" "yunion.io/x/pkg/utils" "yunion.io/x/sqlchemy" "yunion.io/x/onecloud/pkg/apis" api "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/cloudcommon/db" "yunion.io/x/onecloud/pkg/cloudcommon/db/lockman" "yunion.io/x/onecloud/pkg/cloudcommon/db/taskman" "yunion.io/x/onecloud/pkg/cloudcommon/validators" "yunion.io/x/onecloud/pkg/cloudprovider" "yunion.io/x/onecloud/pkg/httperrors" "yunion.io/x/onecloud/pkg/mcclient" "yunion.io/x/onecloud/pkg/util/logclient" "yunion.io/x/onecloud/pkg/util/rbacutils" "yunion.io/x/onecloud/pkg/util/stringutils2" ) type SWireManager struct { db.SInfrasResourceBaseManager db.SExternalizedResourceBaseManager db.SStatusResourceBaseManager SManagedResourceBaseManager SVpcResourceBaseManager SZoneResourceBaseManager } var WireManager *SWireManager func init() { WireManager = &SWireManager{ SInfrasResourceBaseManager: db.NewInfrasResourceBaseManager( SWire{}, "wires_tbl", "wire", "wires", ), } WireManager.SetVirtualObject(WireManager) } type SWire struct { db.SInfrasResourceBase db.SExternalizedResourceBase db.SStatusResourceBase // SManagedResourceBase SVpcResourceBase `wdith:"36" charset:"ascii" nullable:"false" list:"domain" create:"domain_required" update:""` SZoneResourceBase `width:"36" charset:"ascii" nullable:"true" list:"domain" create:"domain_required" update:""` // 带宽大小, 单位Mbps // example: 1000 Bandwidth int `list:"domain" update:"domain" nullable:"false" create:"domain_required" json:"bandwidth"` // MTU // example: 1500 Mtu int `list:"domain" update:"domain" nullable:"false" create:"domain_optional" default:"1500" json:"mtu"` // swagger:ignore ScheduleRank int `list:"domain" update:"domain" json:"schedule_rank"` // 可用区Id // ZoneId string `width:"36" charset:"ascii" nullable:"true" list:"domain" create:"domain_required"` // VPC Id // VpcId string `wdith:"36" charset:"ascii" nullable:"false" list:"domain" create:"domain_required"` } func (manager *SWireManager) GetContextManagers() [][]db.IModelManager { return [][]db.IModelManager{ {ZoneManager}, {VpcManager}, } } func (manager *SWireManager) ValidateCreateData( ctx context.Context, userCred mcclient.TokenCredential, ownerId mcclient.IIdentityProvider, query jsonutils.JSONObject, input api.WireCreateInput, ) (api.WireCreateInput, error) { var err error if input.Bandwidth < 0 { return input, httperrors.NewOutOfRangeError("bandwidth must be greater than 0") } if input.Mtu < 0 || input.Mtu > 1000000 { return input, httperrors.NewOutOfRangeError("mtu must be range of 0~1000000") } if input.VpcId == "" { input.VpcId = api.DEFAULT_VPC_ID } _vpc, err := validators.ValidateModel(userCred, VpcManager, &input.VpcId) if err != nil { return input, err } vpc := _vpc.(*SVpc) if len(vpc.ManagerId) > 0 { return input, httperrors.NewNotSupportedError("Currently only kvm platform supports creating wire") } if len(input.ZoneId) == 0 { return input, httperrors.NewMissingParameterError("zone") } _, input.ZoneResourceInput, err = ValidateZoneResourceInput(userCred, input.ZoneResourceInput) if err != nil { return input, errors.Wrap(err, "ValidateZoneResourceInput") } input.InfrasResourceBaseCreateInput, err = manager.SInfrasResourceBaseManager.ValidateCreateData(ctx, userCred, ownerId, query, input.InfrasResourceBaseCreateInput) if err != nil { return input, err } return input, nil } func (wire *SWire) SetStatus(userCred mcclient.TokenCredential, status string, reason string) error { return db.StatusBaseSetStatus(wire, userCred, status, reason) } func (wire *SWire) ValidateUpdateData(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input api.WireUpdateInput) (api.WireUpdateInput, error) { data := jsonutils.Marshal(input).(*jsonutils.JSONDict) keysV := []validators.IValidator{ validators.NewNonNegativeValidator("bandwidth"), validators.NewRangeValidator("mtu", 1, 1000000).Optional(true), } for _, v := range keysV { v.Optional(true) if err := v.Validate(data); err != nil { return input, err } } var err error input.InfrasResourceBaseUpdateInput, err = wire.SInfrasResourceBase.ValidateUpdateData(ctx, userCred, query, input.InfrasResourceBaseUpdateInput) if err != nil { return input, errors.Wrap(err, "SInfrasResourceBase.ValidateUpdateData") } return input, nil } func (wire *SWire) ValidateDeleteCondition(ctx context.Context) error { cnt, err := wire.HostCount() if err != nil { return httperrors.NewInternalServerError("HostCount fail %s", err) } if cnt > 0 { return httperrors.NewNotEmptyError("wire contains hosts") } cnt, err = wire.NetworkCount() if err != nil { return httperrors.NewInternalServerError("NetworkCount fail %s", err) } if cnt > 0 { return httperrors.NewNotEmptyError("wire contains networks") } return wire.SInfrasResourceBase.ValidateDeleteCondition(ctx) } func (manager *SWireManager) getWireExternalIdForClassicNetwork(provider string, vpcId string, zoneId string) string { if !utils.IsInStringArray(provider, api.REGIONAL_NETWORK_PROVIDERS) { return fmt.Sprintf("%s-%s", vpcId, zoneId) } return vpcId } func (manager *SWireManager) GetOrCreateWireForClassicNetwork(ctx context.Context, vpc *SVpc, zone *SZone) (*SWire, error) { cloudprovider := vpc.GetCloudprovider() if cloudprovider == nil { return nil, fmt.Errorf("failed to found cloudprovider for vpc %s(%s)", vpc.Id, vpc.Id) } externalId := manager.getWireExternalIdForClassicNetwork(cloudprovider.Provider, vpc.Id, zone.Id) name := fmt.Sprintf("emulate for vpc %s classic network", vpc.Id) zoneId := zone.Id if utils.IsInStringArray(cloudprovider.Provider, api.REGIONAL_NETWORK_PROVIDERS) { //reginal network zoneId = "" } else { name = fmt.Sprintf("emulate for zone %s vpc %s classic network", zone.Name, vpc.Id) } _wire, err := db.FetchByExternalIdAndManagerId(manager, externalId, func(q *sqlchemy.SQuery) *sqlchemy.SQuery { sq := VpcManager.Query().SubQuery() return q.Join(sq, sqlchemy.Equals(sq.Field("id"), q.Field("id"))).Filter(sqlchemy.Equals(sq.Field("manager_id"), vpc.ManagerId)) }) if err == nil { return _wire.(*SWire), nil } if errors.Cause(err) != sql.ErrNoRows { return nil, errors.Wrap(err, "db.FetchByExternalId") } wire := &SWire{} wire.VpcId = vpc.Id wire.ZoneId = zoneId wire.SetModelManager(manager, wire) wire.ExternalId = externalId wire.IsEmulated = true wire.Name = name err = manager.TableSpec().Insert(ctx, wire) if err != nil { return nil, errors.Wrap(err, "Insert wire for classic network") } return wire, nil } func (wire *SWire) getHostwireQuery() *sqlchemy.SQuery { return HostwireManager.Query().Equals("wire_id", wire.Id) } func (wire *SWire) HostCount() (int, error) { q := HostwireManager.Query().Equals("wire_id", wire.Id).GroupBy("host_id") return q.CountWithError() } func (wire *SWire) GetHostwires() ([]SHostwire, error) { q := wire.getHostwireQuery() hostwires := make([]SHostwire, 0) err := db.FetchModelObjects(HostwireManager, q, &hostwires) if err != nil { return nil, err } return hostwires, nil } func (wire *SWire) NetworkCount() (int, error) { q := NetworkManager.Query().Equals("wire_id", wire.Id) return q.CountWithError() } func (wire *SWire) GetVpcId() string { if len(wire.VpcId) == 0 { return "default" } else { return wire.VpcId } } func (manager *SWireManager) getWiresByVpcAndZone(vpc *SVpc, zone *SZone) ([]SWire, error) { wires := make([]SWire, 0) q := manager.Query() if vpc != nil { q = q.Equals("vpc_id", vpc.Id) } if zone != nil { q = q.Equals("zone_id", zone.Id) } err := db.FetchModelObjects(manager, q, &wires) if err != nil { return nil, err } return wires, nil } func (manager *SWireManager) SyncWires(ctx context.Context, userCred mcclient.TokenCredential, vpc *SVpc, wires []cloudprovider.ICloudWire, provider *SCloudprovider) ([]SWire, []cloudprovider.ICloudWire, compare.SyncResult) { lockman.LockRawObject(ctx, "wires", vpc.Id) defer lockman.ReleaseRawObject(ctx, "wires", vpc.Id) localWires := make([]SWire, 0) remoteWires := make([]cloudprovider.ICloudWire, 0) syncResult := compare.SyncResult{} dbWires, err := manager.getWiresByVpcAndZone(vpc, nil) if err != nil { syncResult.Error(err) return nil, nil, syncResult } for i := range dbWires { if taskman.TaskManager.IsInTask(&dbWires[i]) { syncResult.Error(fmt.Errorf("object in task")) return nil, nil, syncResult } } removed := make([]SWire, 0) commondb := make([]SWire, 0) commonext := make([]cloudprovider.ICloudWire, 0) added := make([]cloudprovider.ICloudWire, 0) err = compare.CompareSets(dbWires, wires, &removed, &commondb, &commonext, &added) if err != nil { syncResult.Error(err) return nil, nil, syncResult } for i := 0; i < len(removed); i += 1 { err = removed[i].syncRemoveCloudWire(ctx, userCred) if err != nil { // cannot delete syncResult.DeleteError(err) } else { syncResult.Delete() } } for i := 0; i < len(commondb); i += 1 { err = commondb[i].syncWithCloudWire(ctx, userCred, commonext[i], vpc, provider) if err != nil { syncResult.UpdateError(err) } else { syncMetadata(ctx, userCred, &commondb[i], commonext[i]) localWires = append(localWires, commondb[i]) remoteWires = append(remoteWires, commonext[i]) syncResult.Update() } } for i := 0; i < len(added); i += 1 { new, err := manager.newFromCloudWire(ctx, userCred, added[i], vpc, provider) if err != nil { syncResult.AddError(err) } else { syncMetadata(ctx, userCred, new, added[i]) localWires = append(localWires, *new) remoteWires = append(remoteWires, added[i]) syncResult.Add() } } return localWires, remoteWires, syncResult } func (self *SWire) syncRemoveCloudWire(ctx context.Context, userCred mcclient.TokenCredential) error { lockman.LockObject(ctx, self) defer lockman.ReleaseObject(ctx, self) vpc := self.GetVpc() cloudprovider := vpc.GetCloudprovider() if self.ExternalId == WireManager.getWireExternalIdForClassicNetwork(cloudprovider.Provider, self.VpcId, self.ZoneId) { return nil } err := self.ValidateDeleteCondition(ctx) if err != nil { // cannot delete err = self.markNetworkUnknown(userCred) } else { err = self.Delete(ctx, userCred) } return err } func (self *SWire) syncWithCloudWire(ctx context.Context, userCred mcclient.TokenCredential, extWire cloudprovider.ICloudWire, vpc *SVpc, provider *SCloudprovider) error { diff, err := db.UpdateWithLock(ctx, self, func() error { // self.Name = extWire.GetName() self.Bandwidth = extWire.GetBandwidth() // 10G self.IsEmulated = extWire.IsEmulated() self.Status = extWire.GetStatus() vpc := self.GetVpc() if vpc != nil { region, err := vpc.GetRegion() if err != nil { return errors.Wrapf(err, "vpc.GetRegion") } if utils.IsInStringArray(region.Provider, api.REGIONAL_NETWORK_PROVIDERS) { self.ZoneId = "" } } if self.IsEmulated { self.DomainId = vpc.DomainId // self.IsPublic = vpc.IsPublic // self.PublicScope = vpc.PublicScope // self.PublicSrc = vpc.PublicSrc } return nil }) if err != nil { log.Errorf("syncWithCloudWire error %s", err) } if provider != nil && !self.IsEmulated { SyncCloudDomain(userCred, self, provider.GetOwnerId()) self.SyncShareState(ctx, userCred, provider.getAccountShareInfo()) } else if self.IsEmulated { self.SaveSharedInfo(apis.TOwnerSource(vpc.PublicSrc), ctx, userCred, vpc.GetSharedInfo()) } db.OpsLog.LogSyncUpdate(self, diff, userCred) return err } func (self *SWire) markNetworkUnknown(userCred mcclient.TokenCredential) error { nets, err := self.getNetworks(nil, rbacutils.ScopeNone) if err != nil { return err } for i := 0; i < len(nets); i += 1 { nets[i].SetStatus(userCred, api.NETWORK_STATUS_UNKNOWN, "wire sync to remove") } return nil } func (manager *SWireManager) newFromCloudWire(ctx context.Context, userCred mcclient.TokenCredential, extWire cloudprovider.ICloudWire, vpc *SVpc, provider *SCloudprovider) (*SWire, error) { wire := SWire{} wire.SetModelManager(manager, &wire) wire.ExternalId = extWire.GetGlobalId() wire.Bandwidth = extWire.GetBandwidth() wire.Status = extWire.GetStatus() wire.VpcId = vpc.Id region, err := vpc.GetRegion() if err != nil { return nil, errors.Wrapf(err, "GetRegion for vpc %s(%s)", vpc.Name, vpc.Id) } if !utils.IsInStringArray(region.Provider, api.REGIONAL_NETWORK_PROVIDERS) { izone := extWire.GetIZone() if gotypes.IsNil(izone) { return nil, fmt.Errorf("missing zone for wire %s(%s)", wire.Name, wire.ExternalId) } zone, err := vpc.getZoneByExternalId(izone.GetGlobalId()) if err != nil { return nil, errors.Wrapf(err, "newFromCloudWire.getZoneByExternalId") } wire.ZoneId = zone.Id } wire.IsEmulated = extWire.IsEmulated() wire.DomainId = vpc.DomainId wire.IsPublic = vpc.IsPublic wire.PublicScope = vpc.PublicScope wire.PublicSrc = vpc.PublicSrc err = func() error { lockman.LockRawObject(ctx, manager.Keyword(), "name") defer lockman.ReleaseRawObject(ctx, manager.Keyword(), "name") newName, err := db.GenerateName(ctx, manager, userCred, extWire.GetName()) if err != nil { return err } wire.Name = newName return manager.TableSpec().Insert(ctx, &wire) }() if err != nil { return nil, errors.Wrapf(err, "Insert") } if provider != nil && !wire.IsEmulated { SyncCloudDomain(userCred, &wire, provider.GetOwnerId()) wire.SyncShareState(ctx, userCred, provider.getAccountShareInfo()) } db.OpsLog.LogEvent(&wire, db.ACT_CREATE, wire.GetShortDesc(ctx), userCred) return &wire, nil } func filterByScopeOwnerId(q *sqlchemy.SQuery, scope rbacutils.TRbacScope, ownerId mcclient.IIdentityProvider, domainResource bool) *sqlchemy.SQuery { switch scope { case rbacutils.ScopeSystem: case rbacutils.ScopeDomain: q = q.Equals("domain_id", ownerId.GetProjectDomainId()) case rbacutils.ScopeProject: if domainResource { q = q.Equals("domain_id", ownerId.GetProjectId()) } else { q = q.Equals("tenant_id", ownerId.GetProjectId()) } } return q } func fixVmwareProvider(providers []string) (bool, []string) { findVmware := false findOnecloud := false newp := make([]string, 0) for _, p := range providers { if p == api.CLOUD_PROVIDER_VMWARE { findVmware = true } else { if p == api.CLOUD_PROVIDER_ONECLOUD { findOnecloud = true } newp = append(newp, p) } } if findVmware && !findOnecloud { newp = append(newp, api.CLOUD_PROVIDER_ONECLOUD) } return findVmware, newp } func (manager *SWireManager) totalCountQ( rangeObjs []db.IStandaloneModel, hostTypes []string, hostProviders, hostBrands []string, providers []string, brands []string, cloudEnv string, scope rbacutils.TRbacScope, ownerId mcclient.IIdentityProvider, ) *sqlchemy.SQuery { guestsQ := filterByScopeOwnerId(GuestManager.Query(), scope, ownerId, false) guests := guestsQ.SubQuery() // hosts no filter, for guest networks hostsQ := HostManager.Query() if len(hostTypes) > 0 { hostsQ = hostsQ.In("host_type", hostTypes) } if len(hostProviders) > 0 || len(hostBrands) > 0 || len(cloudEnv) > 0 { hostsQ = CloudProviderFilter(hostsQ, hostsQ.Field("manager_id"), providers, brands, cloudEnv) } if len(rangeObjs) > 0 { hostsQ = RangeObjectsFilter(hostsQ, rangeObjs, nil, hostsQ.Field("zone_id"), hostsQ.Field("manager_id"), hostsQ.Field("id"), nil) } hosts := hostsQ.SubQuery() // hosts filter by owner, for host networks hostsQ2 := HostManager.Query() hostsQ2 = filterByScopeOwnerId(hostsQ2, scope, ownerId, true) if len(hostTypes) > 0 { hostsQ2 = hostsQ2.In("host_type", hostTypes) } if len(hostProviders) > 0 || len(hostBrands) > 0 || len(cloudEnv) > 0 { hostsQ2 = CloudProviderFilter(hostsQ2, hostsQ2.Field("manager_id"), providers, brands, cloudEnv) } if len(rangeObjs) > 0 { hostsQ2 = RangeObjectsFilter(hostsQ2, rangeObjs, nil, hostsQ.Field("zone_id"), hostsQ.Field("manager_id"), hostsQ.Field("id"), nil) } hosts2 := hostsQ2.SubQuery() groups := filterByScopeOwnerId(GroupManager.Query(), scope, ownerId, false).SubQuery() lbsQ := filterByScopeOwnerId(LoadbalancerManager.Query(), scope, ownerId, false) if len(providers) > 0 || len(brands) > 0 || len(cloudEnv) > 0 { lbsQ = CloudProviderFilter(lbsQ, lbsQ.Field("manager_id"), providers, brands, cloudEnv) } if len(rangeObjs) > 0 { lbsQ = RangeObjectsFilter(lbsQ, rangeObjs, lbsQ.Field("cloudregion_id"), lbsQ.Field("zone_id"), lbsQ.Field("manager_id"), nil, nil) } lbs := lbsQ.SubQuery() dbsQ := filterByScopeOwnerId(DBInstanceManager.Query(), scope, ownerId, false) if len(providers) > 0 || len(brands) > 0 || len(cloudEnv) > 0 { dbsQ = CloudProviderFilter(dbsQ, dbsQ.Field("manager_id"), providers, brands, cloudEnv) } if len(rangeObjs) > 0 { dbsQ = RangeObjectsFilter(dbsQ, rangeObjs, dbsQ.Field("cloudregion_id"), dbsQ.Field("zone_id"), dbsQ.Field("manager_id"), nil, nil) } dbs := dbsQ.SubQuery() gNics := GuestnetworkManager.Query().SubQuery() gNicQ := gNics.Query( gNics.Field("network_id"), sqlchemy.COUNT("gnic_count"), sqlchemy.SUM("pending_deleted_gnic_count", guests.Field("pending_deleted")), ) gNicQ = gNicQ.Join(guests, sqlchemy.Equals(guests.Field("id"), gNics.Field("guest_id"))) gNicQ = gNicQ.Join(hosts, sqlchemy.Equals(guests.Field("host_id"), hosts.Field("id"))) gNicQ = gNicQ.Filter(sqlchemy.IsTrue(hosts.Field("enabled"))) hNics := HostnetworkManager.Query().SubQuery() hNicQ := hNics.Query( hNics.Field("network_id"), sqlchemy.COUNT("hnic_count"), ) hNicQ = hNicQ.Join(hosts2, sqlchemy.Equals(hNics.Field("baremetal_id"), hosts2.Field("id"))) hNicQ = hNicQ.Filter(sqlchemy.IsTrue(hosts2.Field("enabled"))) groupNics := GroupnetworkManager.Query().SubQuery() grpNicQ := groupNics.Query( groupNics.Field("network_id"), sqlchemy.COUNT("grpnic_count"), ) grpNicQ = grpNicQ.Join(groups, sqlchemy.Equals(groups.Field("id"), groupNics.Field("group_id"))) lbNics := LoadbalancernetworkManager.Query().SubQuery() lbNicQ := lbNics.Query( lbNics.Field("network_id"), sqlchemy.COUNT("lbnic_count"), ) lbNicQ = lbNicQ.Join(lbs, sqlchemy.Equals(lbs.Field("id"), lbNics.Field("loadbalancer_id"))) lbNicQ = lbNicQ.Filter(sqlchemy.IsFalse(lbs.Field("pending_deleted"))) eipNicsQ := ElasticipManager.Query().IsNotEmpty("network_id") eipNics := filterByScopeOwnerId(eipNicsQ, scope, ownerId, false).SubQuery() eipNicQ := eipNics.Query( eipNics.Field("network_id"), sqlchemy.COUNT("eipnic_count"), ) if len(providers) > 0 || len(brands) > 0 || len(cloudEnv) > 0 { eipNicQ = CloudProviderFilter(eipNicQ, eipNicQ.Field("manager_id"), providers, brands, cloudEnv) } if len(rangeObjs) > 0 { eipNicQ = RangeObjectsFilter(eipNicQ, rangeObjs, eipNicQ.Field("cloudregion_id"), nil, eipNicQ.Field("manager_id"), nil, nil) } netifsQ := NetworkInterfaceManager.Query() netifsQ = filterByScopeOwnerId(netifsQ, scope, ownerId, true) if len(providers) > 0 || len(brands) > 0 || len(cloudEnv) > 0 { netifsQ = CloudProviderFilter(netifsQ, netifsQ.Field("manager_id"), providers, brands, cloudEnv) } if len(rangeObjs) > 0 { netifsQ = RangeObjectsFilter(netifsQ, rangeObjs, netifsQ.Field("cloudregion_id"), nil, netifsQ.Field("manager_id"), nil, nil) } netifs := netifsQ.SubQuery() netifNics := NetworkinterfacenetworkManager.Query().SubQuery() netifNicQ := netifNics.Query( netifNics.Field("network_id"), sqlchemy.COUNT("netifnic_count"), ) netifNicQ = netifNicQ.Join(netifs, sqlchemy.Equals(netifNics.Field("networkinterface_id"), netifs.Field("id"))) dbNics := DBInstanceNetworkManager.Query().SubQuery() dbNicQ := dbNics.Query( dbNics.Field("network_id"), sqlchemy.COUNT("dbnic_count"), ) dbNicQ = dbNicQ.Join(dbs, sqlchemy.Equals(dbs.Field("id"), dbNics.Field("dbinstance_id"))) dbNicQ = dbNicQ.Filter(sqlchemy.IsFalse(dbs.Field("pending_deleted"))) gNicSQ := gNicQ.GroupBy(gNics.Field("network_id")).SubQuery() hNicSQ := hNicQ.GroupBy(hNics.Field("network_id")).SubQuery() grpNicSQ := grpNicQ.GroupBy(groupNics.Field("network_id")).SubQuery() lbNicSQ := lbNicQ.GroupBy(lbNics.Field("network_id")).SubQuery() eipNicSQ := eipNicQ.GroupBy(eipNics.Field("network_id")).SubQuery() netifNicSQ := netifNicQ.GroupBy(netifNics.Field("network_id")).SubQuery() dbNicSQ := dbNicQ.GroupBy(dbNics.Field("network_id")).SubQuery() networks := NetworkManager.Query().SubQuery() netQ := networks.Query( sqlchemy.SUM("guest_nic_count", gNicSQ.Field("gnic_count")), sqlchemy.SUM("pending_deleted_guest_nic_count", gNicSQ.Field("pending_deleted_gnic_count")), sqlchemy.SUM("host_nic_count", hNicSQ.Field("hnic_count")), sqlchemy.SUM("group_nic_count", grpNicSQ.Field("grpnic_count")), sqlchemy.SUM("lb_nic_count", lbNicSQ.Field("lbnic_count")), sqlchemy.SUM("eip_nic_count", eipNicSQ.Field("eipnic_count")), sqlchemy.SUM("netif_nic_count", netifNicSQ.Field("netifnic_count")), sqlchemy.SUM("db_nic_count", dbNicSQ.Field("dbnic_count")), ) netQ = netQ.LeftJoin(gNicSQ, sqlchemy.Equals(gNicSQ.Field("network_id"), networks.Field("id"))) netQ = netQ.LeftJoin(hNicSQ, sqlchemy.Equals(hNicSQ.Field("network_id"), networks.Field("id"))) netQ = netQ.LeftJoin(grpNicSQ, sqlchemy.Equals(grpNicSQ.Field("network_id"), networks.Field("id"))) netQ = netQ.LeftJoin(lbNicSQ, sqlchemy.Equals(lbNicSQ.Field("network_id"), networks.Field("id"))) netQ = netQ.LeftJoin(eipNicSQ, sqlchemy.Equals(eipNicSQ.Field("network_id"), networks.Field("id"))) netQ = netQ.LeftJoin(netifNicSQ, sqlchemy.Equals(netifNicSQ.Field("network_id"), networks.Field("id"))) netQ = netQ.LeftJoin(dbNicSQ, sqlchemy.Equals(dbNicSQ.Field("network_id"), networks.Field("id"))) return netQ } func (manager *SWireManager) totalCountQ2( rangeObjs []db.IStandaloneModel, hostTypes []string, providers []string, brands []string, cloudEnv string, scope rbacutils.TRbacScope, ownerId mcclient.IIdentityProvider, ) *sqlchemy.SQuery { revIps := filterExpiredReservedIps(ReservedipManager.Query()).SubQuery() revQ := revIps.Query( revIps.Field("network_id"), sqlchemy.COUNT("rnic_count"), ) revSQ := revQ.GroupBy(revIps.Field("network_id")).SubQuery() ownerNetworks := filterByScopeOwnerId(NetworkManager.Query(), scope, ownerId, false).SubQuery() ownerNetQ := ownerNetworks.Query( ownerNetworks.Field("wire_id"), sqlchemy.COUNT("id").Label("net_count"), sqlchemy.SUM("rev_count", revSQ.Field("rnic_count")), ) ownerNetQ = ownerNetQ.LeftJoin(revSQ, sqlchemy.Equals(revSQ.Field("network_id"), ownerNetworks.Field("id"))) ownerNetQ = ownerNetQ.GroupBy(ownerNetworks.Field("wire_id")) ownerNetSQ := ownerNetQ.SubQuery() wires := WireManager.Query().SubQuery() q := wires.Query( sqlchemy.SUM("net_count", ownerNetSQ.Field("net_count")), sqlchemy.SUM("reserved_count", ownerNetSQ.Field("rev_count")), ) q = q.LeftJoin(ownerNetSQ, sqlchemy.Equals(wires.Field("id"), ownerNetSQ.Field("wire_id"))) return filterWiresCountQuery(q, hostTypes, providers, brands, cloudEnv, rangeObjs) } func (manager *SWireManager) totalCountQ3( rangeObjs []db.IStandaloneModel, hostTypes []string, providers []string, brands []string, cloudEnv string, scope rbacutils.TRbacScope, ownerId mcclient.IIdentityProvider, ) *sqlchemy.SQuery { wires := filterByScopeOwnerId(WireManager.Query(), scope, ownerId, true).SubQuery() q := wires.Query( sqlchemy.COUNT("id").Label("wires_count"), sqlchemy.SUM("emulated_wires_count", wires.Field("is_emulated")), ) return filterWiresCountQuery(q, hostTypes, providers, brands, cloudEnv, rangeObjs) } func filterWiresCountQuery(q *sqlchemy.SQuery, hostTypes, providers, brands []string, cloudEnv string, rangeObjs []db.IStandaloneModel) *sqlchemy.SQuery { if len(hostTypes) > 0 { hostwires := HostwireManager.Query().SubQuery() hosts := HostManager.Query().SubQuery() hostWireQ := hostwires.Query(hostwires.Field("wire_id")) hostWireQ = hostWireQ.Join(hosts, sqlchemy.Equals(hostWireQ.Field("host_id"), hosts.Field("id"))) hostWireQ = hostWireQ.Filter(sqlchemy.In(hosts.Field("host_type"), hostTypes)) hostWireQ = hostWireQ.GroupBy(hostwires.Field("wire_id")) hostWireSQ := hostWireQ.SubQuery() q = q.Join(hostWireSQ, sqlchemy.Equals(hostWireSQ.Field("wire_id"), q.Field("id"))) } if len(rangeObjs) > 0 || len(providers) > 0 || len(brands) > 0 || len(cloudEnv) > 0 { vpcs := VpcManager.Query().SubQuery() q = q.Join(vpcs, sqlchemy.Equals(q.Field("vpc_id"), vpcs.Field("id"))) q = CloudProviderFilter(q, vpcs.Field("manager_id"), providers, brands, cloudEnv) q = RangeObjectsFilter(q, rangeObjs, vpcs.Field("cloudregion_id"), q.Field("zone_id"), vpcs.Field("manager_id"), nil, nil) } return q } type WiresCountStat struct { WiresCount int EmulatedWiresCount int NetCount int GuestNicCount int HostNicCount int ReservedCount int GroupNicCount int LbNicCount int EipNicCount int NetifNicCount int DbNicCount int PendingDeletedGuestNicCount int } func (wstat WiresCountStat) NicCount() int { return wstat.GuestNicCount + wstat.HostNicCount + wstat.ReservedCount + wstat.GroupNicCount + wstat.LbNicCount + wstat.NetifNicCount + wstat.EipNicCount + wstat.DbNicCount } func (manager *SWireManager) TotalCount( rangeObjs []db.IStandaloneModel, hostTypes []string, providers []string, brands []string, cloudEnv string, scope rbacutils.TRbacScope, ownerId mcclient.IIdentityProvider, ) WiresCountStat { vmwareP, hostProviders := fixVmwareProvider(providers) vmwareB, hostBrands := fixVmwareProvider(brands) if vmwareP || vmwareB { if !utils.IsInStringArray(api.HOST_TYPE_ESXI, hostTypes) { hostTypes = append(hostTypes, api.HOST_TYPE_ESXI) } } else { if utils.IsInStringArray(api.HOST_TYPE_ESXI, hostTypes) { providers = append(providers, api.CLOUD_PROVIDER_VMWARE) brands = append(brands, api.CLOUD_PROVIDER_VMWARE) } } if len(hostTypes) > 0 { for _, p := range providers { if hs, ok := api.CLOUD_PROVIDER_HOST_TYPE_MAP[p]; ok { hostTypes = append(hostTypes, hs...) } } for _, p := range brands { if hs, ok := api.CLOUD_PROVIDER_HOST_TYPE_MAP[p]; ok { hostTypes = append(hostTypes, hs...) } } } log.Debugf("providers: %#v hostProviders: %#v brands: %#v hostBrands: %#v hostTypes: %#v", providers, hostProviders, brands, hostBrands, hostTypes) stat := WiresCountStat{} err := manager.totalCountQ( rangeObjs, hostTypes, hostProviders, hostBrands, providers, brands, cloudEnv, scope, ownerId, ).First(&stat) if err != nil { log.Errorf("Wire total count: %v", err) } err = manager.totalCountQ2( rangeObjs, hostTypes, providers, brands, cloudEnv, scope, ownerId, ).First(&stat) if err != nil { log.Errorf("Wire total count 2: %v", err) } err = manager.totalCountQ3( rangeObjs, hostTypes, providers, brands, cloudEnv, scope, ownerId, ).First(&stat) if err != nil { log.Errorf("Wire total count 2: %v", err) } return stat } func (self *SWire) getNetworkQuery(ownerId mcclient.IIdentityProvider, scope rbacutils.TRbacScope) *sqlchemy.SQuery { q := NetworkManager.Query().Equals("wire_id", self.Id) if ownerId != nil { q = NetworkManager.FilterByOwner(q, ownerId, scope) } return q } func (self *SWire) GetNetworks(ownerId mcclient.IIdentityProvider, scope rbacutils.TRbacScope) ([]SNetwork, error) { return self.getNetworks(ownerId, scope) } func (self *SWire) getNetworks(ownerId mcclient.IIdentityProvider, scope rbacutils.TRbacScope) ([]SNetwork, error) { q := self.getNetworkQuery(ownerId, scope) nets := make([]SNetwork, 0) err := db.FetchModelObjects(NetworkManager, q, &nets) if err != nil { return nil, err } return nets, nil } func (self *SWire) getGatewayNetworkQuery(ownerId mcclient.IIdentityProvider, scope rbacutils.TRbacScope) *sqlchemy.SQuery { q := self.getNetworkQuery(ownerId, scope) q = q.IsNotNull("guest_gateway").IsNotEmpty("guest_gateway") q = q.Equals("status", api.NETWORK_STATUS_AVAILABLE) return q } func (self *SWire) getAutoAllocNetworks(ownerId mcclient.IIdentityProvider, scope rbacutils.TRbacScope) ([]SNetwork, error) { q := self.getGatewayNetworkQuery(ownerId, scope) q = q.IsTrue("is_auto_alloc") nets := make([]SNetwork, 0) err := db.FetchModelObjects(NetworkManager, q, &nets) if err != nil { return nil, err } return nets, nil } func (self *SWire) getPublicNetworks(ownerId mcclient.IIdentityProvider, scope rbacutils.TRbacScope) ([]SNetwork, error) { q := self.getGatewayNetworkQuery(ownerId, scope) q = q.IsTrue("is_public") nets := make([]SNetwork, 0) err := db.FetchModelObjects(NetworkManager, q, &nets) if err != nil { return nil, err } return nets, nil } func (self *SWire) getPrivateNetworks(ownerId mcclient.IIdentityProvider, scope rbacutils.TRbacScope) ([]SNetwork, error) { q := self.getGatewayNetworkQuery(ownerId, scope) q = q.IsFalse("is_public") nets := make([]SNetwork, 0) err := db.FetchModelObjects(NetworkManager, q, &nets) if err != nil { return nil, err } return nets, nil } func (self *SWire) GetCandidatePrivateNetwork(ownerId mcclient.IIdentityProvider, scope rbacutils.TRbacScope, isExit bool, serverTypes []string) (*SNetwork, error) { nets, err := self.getPrivateNetworks(ownerId, scope) if err != nil { return nil, err } return ChooseCandidateNetworks(nets, isExit, serverTypes), nil } func (self *SWire) GetCandidateAutoAllocNetwork(ownerId mcclient.IIdentityProvider, scope rbacutils.TRbacScope, isExit bool, serverTypes []string) (*SNetwork, error) { nets, err := self.getAutoAllocNetworks(ownerId, scope) if err != nil { return nil, err } return ChooseCandidateNetworks(nets, isExit, serverTypes), nil } func (self *SWire) GetCandidateNetworkForIp(ownerId mcclient.IIdentityProvider, scope rbacutils.TRbacScope, ipAddr string) (*SNetwork, error) { ip, err := netutils.NewIPV4Addr(ipAddr) if err != nil { return nil, err } netPrivates, err := self.getPrivateNetworks(ownerId, scope) if err != nil { return nil, err } for _, net := range netPrivates { if net.IsAddressInRange(ip) { return &net, nil } } netPublics, err := self.getPublicNetworks(ownerId, scope) if err != nil { return nil, err } for _, net := range netPublics { if net.IsAddressInRange(ip) { return &net, nil } } return nil, nil } func ChooseNetworkByAddressCount(nets []*SNetwork) (*SNetwork, *SNetwork) { return chooseNetworkByAddressCount(nets) } func chooseNetworkByAddressCount(nets []*SNetwork) (*SNetwork, *SNetwork) { minCnt := 65535 maxCnt := 0 var minSel *SNetwork var maxSel *SNetwork for _, net := range nets { cnt, err := net.getFreeAddressCount() if err != nil || cnt <= 0 { continue } if minSel == nil || minCnt > cnt { minSel = net minCnt = cnt } if maxSel == nil || maxCnt < cnt { maxSel = net maxCnt = cnt } } return minSel, maxSel } func ChooseCandidateNet
isExit bool, serverTypes []string) *SNetwork { matchingNets := make([]*SNetwork, 0) notMatchingNets := make([]*SNetwork, 0) for _, s := range serverTypes { net := chooseCandidateNetworksByNetworkType(nets, isExit, s) if net != nil { if utils.IsInStringArray(net.ServerType, serverTypes) { matchingNets = append(matchingNets, net) } else { notMatchingNets = append(notMatchingNets, net) } } } if len(matchingNets) >= 1 { return matchingNets[0] } if len(notMatchingNets) >= 1 { return notMatchingNets[0] } return nil } func chooseCandidateNetworksByNetworkType(nets []SNetwork, isExit bool, serverType string) *SNetwork { matchingNets := make([]*SNetwork, 0) notMatchingNets := make([]*SNetwork, 0) for i := 0; i < len(nets); i++ { net := nets[i] if isExit != net.IsExitNetwork() { continue } if serverType == net.ServerType || (len(net.ServerType) == 0 && serverType == api.NETWORK_TYPE_GUEST) { matchingNets = append(matchingNets, &net) } else { notMatchingNets = append(notMatchingNets, &net) } } minSel, maxSel := chooseNetworkByAddressCount(matchingNets) if (isExit && minSel == nil) || (!isExit && maxSel == nil) { minSel, maxSel = chooseNetworkByAddressCount(notMatchingNets) } if isExit { return minSel } else { return maxSel } } func (manager *SWireManager) InitializeData() error { wires := make([]SWire, 0) q := manager.Query() q.Filter(sqlchemy.OR(sqlchemy.IsEmpty(q.Field("vpc_id")), sqlchemy.IsEmpty(q.Field("status")), sqlchemy.Equals(q.Field("status"), "init"), sqlchemy.Equals(q.Field("status"), api.WIRE_STATUS_READY_DEPRECATED))) err := db.FetchModelObjects(manager, q, &wires) if err != nil { return err } for _, w := range wires { db.Update(&w, func() error { if len(w.VpcId) == 0 { w.VpcId = api.DEFAULT_VPC_ID } if len(w.Status) == 0 || w.Status == "init" || w.Status == api.WIRE_STATUS_READY_DEPRECATED { w.Status = api.WIRE_STATUS_AVAILABLE } return nil }) } return nil } func (wire *SWire) isOneCloudVpcWire() bool { return IsOneCloudVpcResource(wire) } func (wire *SWire) getEnabledHosts() []SHost { hosts := make([]SHost, 0) hostQuery := HostManager.Query().SubQuery() hostwireQuery := HostwireManager.Query().SubQuery() q := hostQuery.Query() q = q.Join(hostwireQuery, sqlchemy.AND(sqlchemy.Equals(hostQuery.Field("id"), hostwireQuery.Field("host_id")), sqlchemy.IsFalse(hostwireQuery.Field("deleted")))) q = q.Filter(sqlchemy.IsTrue(hostQuery.Field("enabled"))) q = q.Filter(sqlchemy.Equals(hostQuery.Field("host_status"), api.HOST_ONLINE)) if wire.isOneCloudVpcWire() { q = q.Filter(sqlchemy.NOT(sqlchemy.IsNullOrEmpty(hostQuery.Field("ovn_version")))) } else { q = q.Filter(sqlchemy.Equals(hostwireQuery.Field("wire_id"), wire.Id)) } err := db.FetchModelObjects(HostManager, q, &hosts) if err != nil { log.Errorf("getEnabledHosts fail %s", err) return nil } return hosts } func (wire *SWire) clearHostSchedDescCache() error { hosts := wire.getEnabledHosts() if hosts != nil { for i := 0; i < len(hosts); i += 1 { host := hosts[i] if err := host.ClearSchedDescCache(); err != nil { return errors.Wrapf(err, "wire %s clear host %s sched cache", wire.GetName(), host.GetName()) } } } return nil } func (self *SWire) GetIWire() (cloudprovider.ICloudWire, error) { vpc := self.GetVpc() if vpc == nil { log.Errorf("Cannot find VPC for wire???") return nil, fmt.Errorf("No VPC?????") } ivpc, err := vpc.GetIVpc() if err != nil { return nil, err } return ivpc.GetIWireById(self.GetExternalId()) } func (manager *SWireManager) FetchWireById(wireId string) *SWire { wireObj, err := manager.FetchById(wireId) if err != nil { log.Errorf("FetchWireById fail %s", err) return nil } return wireObj.(*SWire) } func (manager *SWireManager) GetOnPremiseWireOfIp(ipAddr string) (*SWire, error) { net, err := NetworkManager.GetOnPremiseNetworkOfIP(ipAddr, "", tristate.None) if err != nil { return nil, err } wire := net.GetWire() if wire != nil { return wire, nil } else { return nil, fmt.Errorf("Wire not found") } } func (w *SWire) AllowPerformMergeNetwork(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject) bool { return w.IsOwner(userCred) || db.IsAdminAllowPerform(userCred, w, "merge-network") } func (w *SWire) PerformMergeNetwork(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input api.WireMergeNetworkInput) (jsonutils.JSONObject, error) { return nil, w.StartMergeNetwork(ctx, userCred, "") } func (sm *SWireManager) FetchByIdsOrNames(idOrNames []string) ([]SWire, error) { if len(idOrNames) == 0 { return nil, nil } q := sm.Query() if len(idOrNames) == 1 { q.Filter(sqlchemy.OR(sqlchemy.Equals(q.Field("id"), idOrNames[0]), sqlchemy.Equals(q.Field("name"), idOrNames[0]))) } else { q.Filter(sqlchemy.OR(sqlchemy.In(q.Field("id"), idOrNames), sqlchemy.In(q.Field("name"), idOrNames))) } ret := make([]SWire, 0, len(idOrNames)) err := db.FetchModelObjects(sm, q, &ret) if err != nil { return nil, err } return ret, nil } func (w *SWire) AllowPerformMergeFrom(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject) bool { return w.IsOwner(userCred) || db.IsAdminAllowPerform(userCred, w, "merge-from") } func (w *SWire) PerformMergeFrom(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input api.WireMergeFromInput) (ret jsonutils.JSONObject, err error) { if len(input.Sources) == 0 { return nil, httperrors.NewMissingParameterError("sources") } defer func() { if err != nil { logclient.AddActionLogWithContext(ctx, w, logclient.ACT_MERGE, err.Error(), userCred, false) } }() wires, err := WireManager.FetchByIdsOrNames(input.Sources) if err != nil { return } wireIdOrNameSet := sets.NewString(input.Sources...) for i := range wires { id, name := wires[i].GetId(), wires[i].GetName() if wireIdOrNameSet.Has(id) { wireIdOrNameSet.Delete(id) continue } if wireIdOrNameSet.Has(name) { wireIdOrNameSet.Delete(name) } } if wireIdOrNameSet.Len() > 0 { return nil, httperrors.NewInputParameterError("invalid wire id or name %v", wireIdOrNameSet.UnsortedList()) } lockman.LockClass(ctx, WireManager, db.GetLockClassKey(WireManager, userCred)) defer lockman.ReleaseClass(ctx, WireManager, db.GetLockClassKey(WireManager, userCred)) for _, tw := range wires { err = WireManager.handleWireIdChange(ctx, &wireIdChangeArgs{ oldWire: &tw, newWire: w, }) if err != nil { return nil, errors.Wrapf(err, "unable to merge wire %s to %s", tw.GetId(), w.GetId()) } if err = tw.Delete(ctx, userCred); err != nil { return nil, err } } logclient.AddActionLogWithContext(ctx, w, logclient.ACT_MERGE_FROM, "", userCred, true) if input.MergeNetwork { err = w.StartMergeNetwork(ctx, userCred, "") if err != nil { return nil, errors.Wrap(err, "unableto StartMergeNetwork") } } return } func (w *SWire) AllowPerformMergeTo(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject) bool { return w.IsOwner(userCred) || db.IsAdminAllowPerform(userCred, w, "merge-to") } func (w *SWire) PerformMergeTo(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input api.WireMergeInput) (ret jsonutils.JSONObject, err error) { if len(input.Target) == 0 { return nil, httperrors.NewMissingParameterError("target") } defer func() { if err != nil { logclient.AddActionLogWithContext(ctx, w, logclient.ACT_MERGE, err.Error(), userCred, false) } }() iw, err := WireManager.FetchByIdOrName(userCred, input.Target) if err == sql.ErrNoRows { err = httperrors.NewNotFoundError("Wire %q", input.Target) return } if err != nil { return } tw := iw.(*SWire) lockman.LockClass(ctx, WireManager, db.GetLockClassKey(WireManager, userCred)) defer lockman.ReleaseClass(ctx, WireManager, db.GetLockClassKey(WireManager, userCred)) err = WireManager.handleWireIdChange(ctx, &wireIdChangeArgs{ oldWire: w, newWire: tw, }) if err != nil { return } logclient.AddActionLogWithContext(ctx, w, logclient.ACT_MERGE, "", userCred, true) if err = w.Delete(ctx, userCred); err != nil { return nil, err } if input.MergeNetwork { err = tw.StartMergeNetwork(ctx, userCred, "") if err != nil { return nil, errors.Wrap(err, "unableto StartMergeNetwork") } } return } func (w *SWire) StartMergeNetwork(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string) error { task, err := taskman.TaskManager.NewTask(ctx, "NetworksUnderWireMergeTask", w, userCred, nil, parentTaskId, "", nil) if err != nil { return err } task.ScheduleRun(nil) return nil } func (wm *SWireManager) handleWireIdChange(ctx context.Context, args *wireIdChangeArgs) error { handlers := []wireIdChangeHandler{ HostwireManager, NetworkManager, LoadbalancerClusterManager, } errs := []error{} for _, h := range handlers { if err := h.handleWireIdChange(ctx, args); err != nil { errs = append(errs, err) } } if len(errs) > 0 { err := errors.NewAggregate(errs) return httperrors.NewGeneralError(err) } return nil } // 二层网络列表 func (manager *SWireManager) ListItemFilter( ctx context.Context, q *sqlchemy.SQuery, userCred mcclient.TokenCredential, query api.WireListInput, ) (*sqlchemy.SQuery, error) { var err error q, err = manager.SVpcResourceBaseManager.ListItemFilter(ctx, q, userCred, query.VpcFilterListInput) if err != nil { return nil, errors.Wrap(err, "SVpcResourceBaseManager.ListItemFilter") } q, err = manager.SExternalizedResourceBaseManager.ListItemFilter(ctx, q, userCred, query.ExternalizedResourceBaseListInput) if err != nil { return nil, errors.Wrap(err, "SExternalizedResourceBaseManager.ListItemFilter") } zoneQuery := api.ZonalFilterListInput{ ZonalFilterListBase: query.ZonalFilterListBase, } q, err = manager.SZoneResourceBaseManager.ListItemFilter(ctx, q, userCred, zoneQuery) if err != nil { return nil, errors.Wrap(err, "SZoneResourceBaseManager.ListItemFilter") } q, err = manager.SInfrasResourceBaseManager.ListItemFilter(ctx, q, userCred, query.InfrasResourceBaseListInput) if err != nil { return nil, errors.Wrap(err, "SInfrasResourceBaseManager.ListItemFilter") } hostStr := query.HostId if len(hostStr) > 0 { hostObj, err := HostManager.FetchByIdOrName(userCred, hostStr) if err != nil { return nil, httperrors.NewResourceNotFoundError2(HostManager.Keyword(), hostStr) } sq := HostwireManager.Query("wire_id").Equals("host_id", hostObj.GetId()) q = q.Filter(sqlchemy.In(q.Field("id"), sq.SubQuery())) } if query.Bandwidth != nil { q = q.Equals("bandwidth", *query.Bandwidth) } return q, nil } func (manager *SWireManager) OrderByExtraFields( ctx context.Context, q *sqlchemy.SQuery, userCred mcclient.TokenCredential, query api.WireListInput, ) (*sqlchemy.SQuery, error) { var err error q, err = manager.SInfrasResourceBaseManager.OrderByExtraFields(ctx, q, userCred, query.InfrasResourceBaseListInput) if err != nil { return nil, errors.Wrap(err, "SInfrasResourceBaseManager.OrderByExtraFields") } q, err = manager.SVpcResourceBaseManager.OrderByExtraFields(ctx, q, userCred, query.VpcFilterListInput) if err != nil { return nil, errors.Wrap(err, "SVpcResourceBaseManager.OrderByExtraFields") } zoneQuery := api.ZonalFilterListInput{ ZonalFilterListBase: query.ZonalFilterListBase, } q, err = manager.SZoneResourceBaseManager.OrderByExtraFields(ctx, q, userCred, zoneQuery) if err != nil { return nil, errors.Wrap(err, "SZoneResourceBaseManager.OrderByExtraFields") } return q, nil } func (manager *SWireManager) QueryDistinctExtraField(q *sqlchemy.SQuery, field string) (*sqlchemy.SQuery, error) { var err error q, err = manager.SInfrasResourceBaseManager.QueryDistinctExtraField(q, field) if err == nil { return q, nil } q, err = manager.SVpcResourceBaseManager.QueryDistinctExtraField(q, field) if err == nil { return q, nil } q, err = manager.SZoneResourceBaseManager.QueryDistinctExtraField(q, field) if err == nil { return q, nil } return q, httperrors.ErrNotFound } /*func (self *SWire) getRegion() *SCloudregion { zone := self.GetZone() if zone != nil { return zone.GetRegion() } vpc := self.getVpc() if vpc != nil { region, _ := vpc.GetRegion() return region } return nil }*/ func (self *SWire) GetExtraDetails(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, isList bool) (api.WireDetails, error) { return api.WireDetails{}, nil } func (manager *SWireManager) FetchCustomizeColumns( ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, objs []interface{}, fields stringutils2.SSortedStrings, isList bool, ) []api.WireDetails { rows := make([]api.WireDetails, len(objs)) stdRows := manager.SInfrasResourceBaseManager.FetchCustomizeColumns(ctx, userCred, query, objs, fields, isList) vpcRows := manager.SVpcResourceBaseManager.FetchCustomizeColumns(ctx, userCred, query, objs, fields, isList) zoneRows := manager.SZoneResourceBaseManager.FetchCustomizeColumns(ctx, userCred, query, objs, fields, isList) for i := range rows { rows[i] = api.WireDetails{ InfrasResourceBaseDetails: stdRows[i], VpcResourceInfo: vpcRows[i], ZoneResourceInfoBase: zoneRows[i].ZoneResourceInfoBase, } wire := objs[i].(*SWire) rows[i].Networks, _ = wire.NetworkCount() rows[i].HostCount, _ = wire.HostCount() } return rows } func (man *SWireManager) removeWiresByVpc(ctx context.Context, userCred mcclient.TokenCredential, vpc *SVpc) error { wires := []SWire{} q := man.Query().Equals("vpc_id", vpc.Id) err := db.FetchModelObjects(man, q, &wires) if err != nil { return err } var errs []error for i := range wires { wire := &wires[i] if err := wire.Delete(ctx, userCred); err != nil { errs = append(errs, err) } } return errors.NewAggregate(errs) } func (self *SWire) IsManaged() bool { vpc := self.GetVpc() if vpc == nil { return false } return vpc.IsManaged() } func (model *SWire) CustomizeCreate(ctx context.Context, userCred mcclient.TokenCredential, ownerId mcclient.IIdentityProvider, query jsonutils.JSONObject, data jsonutils.JSONObject) error { if !data.Contains("public_scope") { vpc := model.GetVpc() if !model.IsManaged() && db.IsAdminAllowPerform(userCred, model, "public") && ownerId.GetProjectDomainId() == userCred.GetProjectDomainId() && vpc != nil && vpc.IsPublic && vpc.PublicScope == string(rbacutils.ScopeSystem) { model.SetShare(rbacutils.ScopeSystem) } else { model.SetShare(rbacutils.ScopeNone) } data.(*jsonutils.JSONDict).Set("public_scope", jsonutils.NewString(model.PublicScope)) } model.Status = api.WIRE_STATUS_AVAILABLE return model.SInfrasResourceBase.CustomizeCreate(ctx, userCred, ownerId, query, data) } func (wire *SWire) GetChangeOwnerCandidateDomainIds() []string { candidates := [][]string{} vpc := wire.GetVpc() if vpc != nil { candidates = append(candidates, vpc.GetChangeOwnerCandidateDomainIds(), db.ISharableChangeOwnerCandidateDomainIds(vpc)) } return db.ISharableMergeChangeOwnerCandidateDomainIds(wire, candidates...) } func (wire *SWire) GetChangeOwnerRequiredDomainIds() []string { requires := stringutils2.SSortedStrings{} networks, _ := wire.getNetworks(nil, rbacutils.ScopeNone) for i := range networks { requires = stringutils2.Append(requires, networks[i].DomainId) } return requires } func (wire *SWire) GetRequiredSharedDomainIds() []string { networks, _ := wire.getNetworks(nil, rbacutils.ScopeNone) if len(networks) == 0 { return wire.SInfrasResourceBase.GetRequiredSharedDomainIds() } requires := make([][]string, len(networks)) for i := range networks { requires[i] = db.ISharableChangeOwnerCandidateDomainIds(&networks[i]) } return db.ISharableMergeShareRequireDomainIds(requires...) } func (manager *SWireManager) ListItemExportKeys(ctx context.Context, q *sqlchemy.SQuery, userCred mcclient.TokenCredential, keys stringutils2.SSortedStrings, ) (*sqlchemy.SQuery, error) { var err error q, err = manager.SInfrasResourceBaseManager.ListItemExportKeys(ctx, q, userCred, keys) if err != nil { return nil, errors.Wrap(err, "SInfrasResourceBaseManager.ListItemExportKeys") } if keys.ContainsAny(manager.SZoneResourceBaseManager.GetExportKeys()...) { q, err = manager.SZoneResourceBaseManager.ListItemExportKeys(ctx, q, userCred, keys) if err != nil { return nil, errors.Wrap(err, "SZoneResourceBaseManager.ListItemExportKeys") } } if keys.ContainsAny(manager.SVpcResourceBaseManager.GetExportKeys()...) { q, err = manager.SVpcResourceBaseManager.ListItemExportKeys(ctx, q, userCred, keys) if err != nil { return nil, errors.Wrap(err, "SVpcResourceBaseManager.ListItemExportKeys") } } return q, nil }
works(nets []SNetwork,
version.py
def _safe_int(string):
__version__ = '3.0.6' VERSION = tuple(_safe_int(x) for x in __version__.split('.'))
try: return int(string) except ValueError: return string
test_geo_location.py
"""The tests for the geojson platform.""" from homeassistant.components import geo_location from homeassistant.components.geo_json_events.geo_location import ( ATTR_EXTERNAL_ID, SCAN_INTERVAL, ) from homeassistant.components.geo_location import ATTR_SOURCE from homeassistant.const import ( ATTR_FRIENDLY_NAME, ATTR_LATITUDE, ATTR_LONGITUDE, ATTR_UNIT_OF_MEASUREMENT, CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_URL, EVENT_HOMEASSISTANT_START, LENGTH_KILOMETERS, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component import homeassistant.util.dt as dt_util from tests.async_mock import MagicMock, call, patch from tests.common import assert_setup_component, async_fire_time_changed URL = "http://geo.json.local/geo_json_events.json" CONFIG = { geo_location.DOMAIN: [ {"platform": "geo_json_events", CONF_URL: URL, CONF_RADIUS: 200} ] } CONFIG_WITH_CUSTOM_LOCATION = { geo_location.DOMAIN: [ { "platform": "geo_json_events", CONF_URL: URL, CONF_RADIUS: 200, CONF_LATITUDE: 15.1, CONF_LONGITUDE: 25.2, } ] } def _generate_mock_feed_entry(external_id, title, distance_to_home, coordinates): """Construct a mock feed entry for testing purposes.""" feed_entry = MagicMock() feed_entry.external_id = external_id feed_entry.title = title feed_entry.distance_to_home = distance_to_home feed_entry.coordinates = coordinates return feed_entry async def test_setup(hass): """Test the general setup of the platform.""" # Set up some mock feed entries for this test. mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 15.5, (-31.0, 150.0)) mock_entry_2 = _generate_mock_feed_entry("2345", "Title 2", 20.5, (-31.1, 150.1)) mock_entry_3 = _generate_mock_feed_entry("3456", "Title 3", 25.5, (-31.2, 150.2)) mock_entry_4 = _generate_mock_feed_entry("4567", "Title 4", 12.5, (-31.3, 150.3)) # Patching 'utcnow' to gain more control over the timed update. utcnow = dt_util.utcnow() with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch( "geojson_client.generic_feed.GenericFeed" ) as mock_feed: mock_feed.return_value.update.return_value = ( "OK", [mock_entry_1, mock_entry_2, mock_entry_3], ) with assert_setup_component(1, geo_location.DOMAIN): assert await async_setup_component(hass, geo_location.DOMAIN, CONFIG) await hass.async_block_till_done() # Artificially trigger update. hass.bus.async_fire(EVENT_HOMEASSISTANT_START) # Collect events. await hass.async_block_till_done() all_states = hass.states.async_all() assert len(all_states) == 3 state = hass.states.get("geo_location.title_1") assert state is not None assert state.name == "Title 1" assert state.attributes == { ATTR_EXTERNAL_ID: "1234", ATTR_LATITUDE: -31.0, ATTR_LONGITUDE: 150.0, ATTR_FRIENDLY_NAME: "Title 1", ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS, ATTR_SOURCE: "geo_json_events", } assert round(abs(float(state.state) - 15.5), 7) == 0 state = hass.states.get("geo_location.title_2") assert state is not None assert state.name == "Title 2" assert state.attributes == { ATTR_EXTERNAL_ID: "2345", ATTR_LATITUDE: -31.1, ATTR_LONGITUDE: 150.1, ATTR_FRIENDLY_NAME: "Title 2", ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS, ATTR_SOURCE: "geo_json_events", } assert round(abs(float(state.state) - 20.5), 7) == 0 state = hass.states.get("geo_location.title_3") assert state is not None assert state.name == "Title 3" assert state.attributes == { ATTR_EXTERNAL_ID: "3456", ATTR_LATITUDE: -31.2, ATTR_LONGITUDE: 150.2, ATTR_FRIENDLY_NAME: "Title 3", ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS, ATTR_SOURCE: "geo_json_events", } assert round(abs(float(state.state) - 25.5), 7) == 0 # Simulate an update - one existing, one new entry, # one outdated entry mock_feed.return_value.update.return_value = ( "OK", [mock_entry_1, mock_entry_4, mock_entry_3], ) async_fire_time_changed(hass, utcnow + SCAN_INTERVAL) await hass.async_block_till_done() all_states = hass.states.async_all() assert len(all_states) == 3 # Simulate an update - empty data, but successful update, # so no changes to entities. mock_feed.return_value.update.return_value = "OK_NO_DATA", None async_fire_time_changed(hass, utcnow + 2 * SCAN_INTERVAL) await hass.async_block_till_done() all_states = hass.states.async_all() assert len(all_states) == 3 # Simulate an update - empty data, removes all entities mock_feed.return_value.update.return_value = "ERROR", None async_fire_time_changed(hass, utcnow + 3 * SCAN_INTERVAL) await hass.async_block_till_done() all_states = hass.states.async_all() assert len(all_states) == 0 async def
(hass): """Test the setup with a custom location.""" # Set up some mock feed entries for this test. mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 2000.5, (-31.1, 150.1)) with patch("geojson_client.generic_feed.GenericFeed") as mock_feed: mock_feed.return_value.update.return_value = "OK", [mock_entry_1] with assert_setup_component(1, geo_location.DOMAIN): assert await async_setup_component( hass, geo_location.DOMAIN, CONFIG_WITH_CUSTOM_LOCATION ) await hass.async_block_till_done() # Artificially trigger update. hass.bus.async_fire(EVENT_HOMEASSISTANT_START) # Collect events. await hass.async_block_till_done() all_states = hass.states.async_all() assert len(all_states) == 1 assert mock_feed.call_args == call((15.1, 25.2), URL, filter_radius=200.0) async def test_setup_race_condition(hass): """Test a particular race condition experienced.""" # 1. Feed returns 1 entry -> Feed manager creates 1 entity. # 2. Feed returns error -> Feed manager removes 1 entity. # However, this stayed on and kept listening for dispatcher signals. # 3. Feed returns 1 entry -> Feed manager creates 1 entity. # 4. Feed returns 1 entry -> Feed manager updates 1 entity. # Internally, the previous entity is updating itself, too. # 5. Feed returns error -> Feed manager removes 1 entity. # There are now 2 entities trying to remove themselves from HA, but # the second attempt fails of course. # Set up some mock feed entries for this test. mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 15.5, (-31.0, 150.0)) delete_signal = "geo_json_events_delete_1234" update_signal = "geo_json_events_update_1234" # Patching 'utcnow' to gain more control over the timed update. utcnow = dt_util.utcnow() with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch( "geojson_client.generic_feed.GenericFeed" ) as mock_feed: with assert_setup_component(1, geo_location.DOMAIN): assert await async_setup_component(hass, geo_location.DOMAIN, CONFIG) await hass.async_block_till_done() mock_feed.return_value.update.return_value = "OK", [mock_entry_1] # Artificially trigger update. hass.bus.async_fire(EVENT_HOMEASSISTANT_START) # Collect events. await hass.async_block_till_done() all_states = hass.states.async_all() assert len(all_states) == 1 assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 1 assert len(hass.data[DATA_DISPATCHER][update_signal]) == 1 # Simulate an update - empty data, removes all entities mock_feed.return_value.update.return_value = "ERROR", None async_fire_time_changed(hass, utcnow + SCAN_INTERVAL) await hass.async_block_till_done() all_states = hass.states.async_all() assert len(all_states) == 0 assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 0 assert len(hass.data[DATA_DISPATCHER][update_signal]) == 0 # Simulate an update - 1 entry mock_feed.return_value.update.return_value = "OK", [mock_entry_1] async_fire_time_changed(hass, utcnow + 2 * SCAN_INTERVAL) await hass.async_block_till_done() all_states = hass.states.async_all() assert len(all_states) == 1 assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 1 assert len(hass.data[DATA_DISPATCHER][update_signal]) == 1 # Simulate an update - 1 entry mock_feed.return_value.update.return_value = "OK", [mock_entry_1] async_fire_time_changed(hass, utcnow + 3 * SCAN_INTERVAL) await hass.async_block_till_done() all_states = hass.states.async_all() assert len(all_states) == 1 assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 1 assert len(hass.data[DATA_DISPATCHER][update_signal]) == 1 # Simulate an update - empty data, removes all entities mock_feed.return_value.update.return_value = "ERROR", None async_fire_time_changed(hass, utcnow + 4 * SCAN_INTERVAL) await hass.async_block_till_done() all_states = hass.states.async_all() assert len(all_states) == 0 # Ensure that delete and update signal targets are now empty. assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 0 assert len(hass.data[DATA_DISPATCHER][update_signal]) == 0
test_setup_with_custom_location
test_messageformatter.ts
import * as Chai from "chai"; import { BifrostProtocol } from "../src/bifrost/Protocol"; import { MessageFormatter } from "../src/MessageFormatter"; import { dummyProtocol } from "./mocks/dummyprotocol"; const expect = Chai.expect; const XMPP = new BifrostProtocol({ id: "prpl-jabber", name: "XMPP", homepage: undefined, summary: undefined, }); const intent = { getClient: () => { return { uploadContent: (content) => { return Promise.resolve("mxc://abc/def"); }, getMediaConfig: () => { return Promise.resolve({"m.upload.size": 1024}); }, }; }, }; describe("MessageFormatter", () => { describe("matrixEventToBody", () => { it("should transform a plain text message to a basic body", () => { const msg = MessageFormatter.matrixEventToBody({ sender: "@foo:bar", event_id: "$event:bar", content: { body: "This is some plaintext!", msgtype: "m.text", }, type: "m.room.message", origin_server_ts: 0, room_id: "!roomid:bar", }, { domain: "bar", homeserverUrl: "http://bar", userPrefix: "_xmpp", }); expect(msg).to.deep.eq({ body: "This is some plaintext!", formatted: [], id: "$event:bar", }); }); it("should transform a formatted message", () => { const msg = MessageFormatter.matrixEventToBody({ sender: "@foo:bar", event_id: "$event:bar", content: { body: "This is some plaintext!", formatted_body: "<em>This</em> is some <b>plaintext</b>!", format: "org.matrix.custom.html", msgtype: "m.text", }, type: "m.room.message", origin_server_ts: 0, room_id: "!roomid:bar", }, { domain: "bar", homeserverUrl: "http://bar", userPrefix: "_xmpp", }); expect(msg).to.deep.eq({ body: "This is some plaintext!", formatted: [{ type: "html", body: "<em>This</em> is some <b>plaintext</b>!", }], id: "$event:bar", }); }); it("should transform an info-less media event", () => { const msg = MessageFormatter.matrixEventToBody({ sender: "@foo:bar", event_id: "$event:bar", content: { body: "image.jpg", url: "mxc://bar/foosdsd", msgtype: "m.image", }, type: "m.room.message", origin_server_ts: 0, room_id: "!roomid:bar", }, { domain: "bar", homeserverUrl: "http://bar", userPrefix: "_xmpp", }); expect(msg).to.deep.eq({ body: "image.jpg", opts: { attachments: [ { mimetype: undefined, size: undefined, uri: "http://bar/_matrix/media/v1/download/bar/foosdsd", }, ], }, id: "$event:bar", }); }); it("should transform a media event", () => { const msg = MessageFormatter.matrixEventToBody({ sender: "@foo:bar", event_id: "$event:bar", content: { body: "image.jpg", url: "mxc://bar/foosdsd", msgtype: "m.image", info: { mimetype: "image/jpeg", size: 1000, }, }, type: "m.room.message", origin_server_ts: 0, room_id: "!roomid:bar", }, { domain: "bar", homeserverUrl: "http://bar", userPrefix: "_xmpp", }); expect(msg).to.deep.eq({ body: "image.jpg", opts: { attachments: [ { mimetype: "image/jpeg", size: 1000, uri: "http://bar/_matrix/media/v1/download/bar/foosdsd", }, ], }, id: "$event:bar", }); }); it("should transform a emote message to a basic body", () => { const msg = MessageFormatter.matrixEventToBody({ sender: "@foo:bar", event_id: "$event:bar", content: { body: "pets the dog", msgtype: "m.emote", }, type: "m.room.message", origin_server_ts: 0, room_id: "!roomid:bar", }, { domain: "bar", homeserverUrl: "http://bar", userPrefix: "_xmpp", }); expect(msg).to.deep.eq({ body: "/me pets the dog", formatted: [], id: "$event:bar", }); }); }); describe("messageToMatrixEvent", async () => { it("should transform an ordinary message to plaintext", async () => { const contents = await MessageFormatter.messageToMatrixEvent( {body: "This is an ordinary message"}, dummyProtocol); expect( contents, ).to.deep.equal({ msgtype: "m.text", body: "This is an ordinary message", }); }); it("should transfer a id to the matrix message", async () => { const contents = await MessageFormatter.messageToMatrixEvent( { body: "This is an ordinary message", id: "foobarID", }, dummyProtocol); expect( contents, ).to.deep.equal({ msgtype: "m.text", remote_id: "foobarID", body: "This is an ordinary message", }); }); it("should transform an /me to m.emote", async () => { const contents = await MessageFormatter.messageToMatrixEvent( {body: "/me wags tail"}, dummyProtocol); expect( contents, ).to.deep.equal({ msgtype: "m.emote", body: "wags tail", }); }); it("should transform a html message", async () => { const contents = await MessageFormatter.messageToMatrixEvent( { body: "wags tail", formatted: [ { type: "html", body: "<body><span>Hi</span></body>", }, ], }, dummyProtocol); expect( contents, ).to.deep.equal({ msgtype: "m.text", body: "wags tail", format: "org.matrix.custom.html",
const contents = await MessageFormatter.messageToMatrixEvent( {body: "awoo", opts: { attachments: [{uri: "fake://thing"}], }}, dummyProtocol, intent); expect( contents, ).to.deep.equal({ msgtype: "m.text", body: "awoo", }); }); it("should handle an attachment using http", async () => { const contents = await MessageFormatter.messageToMatrixEvent( {body: "awoo", opts: { attachments: [{uri: "https://matrix.org/blog/wp-content/uploads/2015/01/logo1.png"}], }}, dummyProtocol, intent); expect( contents, ).to.deep.equal({ msgtype: "m.image", filename: "logo1.png", url: "mxc://abc/def", info: { mimetype: "image/png", size: 2239, }, body: "awoo", }); }); it("prpl-jabber: should transform an ordinary message to plaintext", async () => { const contents = await MessageFormatter.messageToMatrixEvent({body: "This is an ordinary message"}, XMPP); expect( contents, ).to.deep.equal({ msgtype: "m.text", body: "This is an ordinary message", }); }); it("prpl-jabber: should transform arrow bracketed plaintext to plaintext", async () => { const contents = await MessageFormatter.messageToMatrixEvent({body: "<This is an ordinary message"}, XMPP); expect( contents, ).to.deep.equal({ msgtype: "m.text", body: "<This is an ordinary message", }); }); it("prpl-jabber: should transform an HTML message to Matrix HTML", async () => { const contents = await MessageFormatter.messageToMatrixEvent({ body: `<html xmlns='http://jabber.org/protocol/xhtml-im'> <body xmlns='http://www.w3.org/1999/xhtml'> <p> <span style='font-family: Helvetica; font-size: x-large;'>hello halfshot!</span> </p> </body> </html>`}, XMPP, ); expect( contents, ).to.deep.equal({ msgtype: "m.text", format: "org.matrix.custom.html", formatted_body: "<p><span style='font-family: Helvetica; font-size: x-large;'>hello halfshot!<\\span><\\p>", body: "## hello halfshot!", }); }); it("should transform an edited message", async () => { const contents = await MessageFormatter.messageToMatrixEvent( { body: "This is an edited message", original_message: "This is the original message", }, dummyProtocol); expect( contents, ).to.deep.equal({ "msgtype": "m.text", "body": " * This is an edited message", "format": undefined, "formatted_body": undefined, "m.new_content": { "body": "This is an edited message", "m.relates_to": { event_id: "This is the original message", rel_type: "m.replace", }, "msgtype": "m.text", }, }); }); }); });
formatted_body: "<body><span>Hi</span></body>", }); }); it("should ignore an attachment without http", async () => {
.prettierrc.js
module.exports = { singleQuote: true, printWidth: 360, bracketSpacing: true,
arrowParens: "always", useTabs: false, trailingComma: "none", tabWidth: 2 };
andforensics_connector.py
# -*- coding: utf-8 -*- """module for android forensics.""" import os import io import subprocess import sqlite3 from datetime import datetime from modules import logger from modules import manager from modules import interface class AndForensicsConnector(interface.ModuleConnector): NAME = 'andforensics_connector' DESCRIPTION = 'Module for android' TABLE_NAME = 'lv1_os_android_andforensics' _plugin_classes = {} def __init__(self): super(AndForensicsConnector, self).__init__() def Connect(self, par_id, configuration, source_path_spec, knowledge_base): """Connector to connect to AndForensics. Args: par_id: partition id. configuration: configuration values. source_path_spec (dfvfs.PathSpec): path specification of the source file. knowledge_base (KnowledgeBase): knowledge base. """ # 이미지를 복사해와야함 andforensics if os.path.exists(configuration.source_path): cmd = 'python3.6 /home/byeongchan/modules/andForensics/andForensics.py -i \'{0:s}\' -o \'{1:s}\' ' \ '-proc {2:d}'.format(os.path.dirname(configuration.source_path), configuration.tmp_path + os.sep + 'andForensics', 10) proc = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE) ret_code = proc.stdout.read() f = io.StringIO(str(ret_code)) result_msg = f.readline() print(result_msg) f.close() if result_msg[-14:-3] == 'Process End': base_name = os.path.
logger.info('') def mask_table(self, configuration, table_name): if table_name is 'call_history': query = "update lv1_os_and_call_history set timestamp = regexp_replace(timestamp, " \ "'(\\\\d{2,3}-)\\\\d{1,2}(\\\\d{2}-)\\\\d{2}(\\\\d{2})', " \ "'\\\\1**\\\\2**\\\\3');" configuration.cursor.execute_query(query) query = "update lv1_os_and_call_history set phonenumber = regexp_replace(phonenumber, " \ "'((?:(?:0|\\\\+82)(?:10|2|3[1-3]|4[1-4]|5[0-5]|6[1-4]|70)-?)\\\\d{1,2})\\\\d{2}(-?)\\\\d{2}(\\\\d{2})', " \ "'\\\\1**\\\\2**\\\\3')" configuration.cursor.execute_query(query) query = "update lv1_os_and_call_history set file = regexp_replace(file, " \ "'(통화 녹음 )([가-힣]|(?:\\\\d{6}))(?:\\\\s|\\\\S)*(_\\\\d{6}_\\\\d{6})', " \ "'\\\\1\\\\2*\\\\3')" configuration.cursor.execute_query(query) query = "update lv1_os_and_call_history SET contents = if(CHAR_LENGTH(contents)-CHAR_LENGTH(REPLACE(contents,'|',''))=2," \ " CONCAT_WS('|'," \ " REGEXP_REPLACE(SUBSTRING_INDEX(contents, '|', 1)," \ " '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(contact_name:string_num\\\\))', '\\\\1*\\\\2')," \ " REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 2), '|', -1)," \ " '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(contact_name:string\\\\))', '\\\\1*\\\\2')," \ " REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 3), '|', -1)," \ " '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(contact_name:string_num_mixed\\\\))', '\\\\1*\\\\2')" \ " )," \ " CONCAT_WS('|'," \ " SUBSTRING_INDEX(contents, '|', 1)," \ " REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 2), '|', -1)," \ " '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(name:string\\\\))', '\\\\1*\\\\2')," \ " REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 3), '|', -1)," \ " '(^(?:\\\\S|\\\\s){2})(?:\\\\S|\\\\s)*(\\\\(m_subject:string_num\\\\))', '\\\\1*\\\\2')," \ " REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 4), '|', -1)," \ " '(^(?:\\\\S|\\\\s){2})(?:\\\\S|\\\\s)*(\\\\(m_subject:string\\\\))', '\\\\1*\\\\2')," \ " REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 5), '|', -1)," \ " '(^(?:\\\\S|\\\\s){2})(?:\\\\S|\\\\s)*(\\\\(m_subject:string_num_mixed\\\\))', '\\\\1*\\\\2')," \ " REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 6), '|', -1)," \ " '(^(?:\\\\S|\\\\s){3})(?:\\\\S|\\\\s)*(\\\\(m_content:string_num_mixed\\\\))', '\\\\1*\\\\2')," \ " REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 7), '|', -1)," \ " '(^(?:\\\\S|\\\\s){3})(?:\\\\S|\\\\s)*(\\\\(m_content:string_num\\\\))', '\\\\1*\\\\2')," \ " REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 8), '|', -1)," \ " '(^(?:\\\\S|\\\\s){3})(?:\\\\S|\\\\s)*(\\\\(m_content:string\\\\))', '\\\\1*\\\\2')," \ " REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 9), '|', -1)," \ " '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(cnap_name:string_num_mixed\\\\))', '\\\\1*\\\\2')," \ " REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 10), '|', -1)," \ " '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(cnap_name:string\\\\))', '\\\\1*\\\\2')" \ " )" \ ")" configuration.cursor.execute_query(query) manager.ModulesManager.RegisterModule(AndForensicsConnector) def _convert_timestamp(timestamp): if timestamp is None: return 'N/A' if isinstance(timestamp, tuple): to_timestamp = [] for t in timestamp: to_timestamp.append(datetime.fromtimestamp(t).strftime('%Y-%m-%dT%H:%M:%SZ')) return tuple(to_timestamp) else: return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%dT%H:%M:%SZ')
basename(configuration.source_path) output_path = configuration.tmp_path + os.sep + 'andForensics' + os.sep \ + os.path.basename(configuration.source_path) analysis_db_path = output_path + os.sep + 'analysis_' + base_name + '.db' load_db_path = output_path + os.sep + 'loaddb_' + base_name + '.db' preprocess_db_path = output_path + os.sep + 'preprocess_' + base_name + '.db' this_file_path = os.path.dirname( os.path.abspath(__file__)) + os.sep + 'schema' + os.sep + 'android' + os.sep yaml_list = [this_file_path + 'lv1_os_and_app_list.yaml', this_file_path + 'lv1_os_and_call_history.yaml', this_file_path + 'lv1_os_and_emb_file.yaml', this_file_path + 'lv1_os_and_file_history.yaml', this_file_path + 'lv1_os_and_geodata.yaml', this_file_path + 'lv1_os_and_id_pw_hash.yaml', this_file_path + 'lv1_os_and_web_browser_history.yaml'] old_table_list = ['application_list', 'call_history', 'embedded_file', 'file_history', 'geodata', 'id_password_hash', 'web_browser_history'] new_table_list = ['lv1_os_and_app_list', 'lv1_os_and_call_history', 'lv1_os_and_emb_file', 'lv1_os_and_file_history', 'lv1_os_and_geodata', 'lv1_os_and_id_pw_hash', 'lv1_os_and_web_browser_history'] if not self.check_table_from_yaml(configuration, yaml_list, new_table_list): return False info = tuple([par_id, configuration.case_id, configuration.evidence_id]) try: conn = sqlite3.connect(analysis_db_path) cursor = conn.cursor() for idx, table in enumerate(old_table_list): cursor.execute(f'select * from {table}') rows = cursor.fetchall() rows_list = [] for row in rows: if table is 'application_list': row = row[:5] + _convert_timestamp(row[5:13]) + row[13:] rows_list.append(info + row) print(rows_list) query = "" if table is 'application_list': query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s, " \ f"%s, %s, %s, %s, %s, %s, %s, %s, %s);" if table is 'call_history': query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s, " \ f"%s, %s, %s)" elif table is 'embedded_file': query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s, " \ f"%s, %s, %s, %s, %s)" elif table is 'file_history' or table is 'id_password_hash': query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)" elif table is 'geodata': query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s)" elif table is 'web_browser_history': query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, " \ f"%s, %s, %s)" configuration.cursor.bulk_execute(query, rows_list) self.mask_table(configuration, 'call_history') except Exception as exception: logger.error('Database error : {0!s}'.format(exception)) finally: conn.close() else:
SimpleLinear-conditional.reconstructed.py
import psyneulink as pnl comp = pnl.Composition(name="comp") A = pnl.TransferMechanism( name="A", function=pnl.Linear(default_variable=[[0]]), termination_measure=pnl.Distance( metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]] ), ) B = pnl.TransferMechanism( name="B",
termination_measure=pnl.Distance( metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]] ), ) C = pnl.TransferMechanism( name="C", function=pnl.Linear(default_variable=[[0]]), termination_measure=pnl.Distance( metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]] ), ) comp.add_node(A) comp.add_node(B) comp.add_node(C) comp.add_projection( projection=pnl.MappingProjection( name="MappingProjection from A[RESULT] to B[InputPort-0]", function=pnl.LinearMatrix(matrix=[[1.0]]), ), sender=A, receiver=B, ) comp.add_projection( projection=pnl.MappingProjection( name="MappingProjection from B[RESULT] to C[InputPort-0]", function=pnl.LinearMatrix(matrix=[[1.0]]), ), sender=B, receiver=C, ) comp.scheduler.add_condition(A, pnl.AtNCalls(A, 0)) comp.scheduler.add_condition(B, pnl.Always()) comp.scheduler.add_condition(C, pnl.EveryNCalls(B, 5)) comp.scheduler.termination_conds = { pnl.TimeScale.RUN: pnl.Never(), pnl.TimeScale.TRIAL: pnl.AllHaveRun(), }
function=pnl.Linear(default_variable=[[0]]),
image.rs
//! Image API: creating, manipulating and pushing docker images use futures_core::Stream; use futures_util::{stream, stream::StreamExt}; use http::header::CONTENT_TYPE; use http::request::Builder; use hyper::{body::Bytes, Body, Method}; use serde::Serialize; use super::Docker; use crate::auth::{base64_url_encode, DockerCredentials}; use crate::container::Config; use crate::errors::Error; use crate::models::*; use std::cmp::Eq; use std::collections::HashMap; use std::hash::Hash; /// Parameters available for pulling an image, used in the [Create Image /// API](Docker::create_image) /// /// ## Examples /// /// ```rust /// use bollard::image::CreateImageOptions; /// /// use std::default::Default; /// /// CreateImageOptions{ /// from_image: "hello-world", /// ..Default::default() /// }; /// ``` /// /// ```rust /// # use bollard::image::CreateImageOptions; /// # use std::default::Default; /// CreateImageOptions::<String>{ /// ..Default::default() /// }; /// ``` #[derive(Debug, Clone, Default, Serialize)] #[serde(rename_all = "camelCase")] pub struct CreateImageOptions<T> where T: Into<String> + Serialize, { /// Name of the image to pull. The name may include a tag or digest. This parameter may only be /// used when pulling an image. The pull is cancelled if the HTTP connection is closed. pub from_image: T, /// Source to import. The value may be a URL from which the image can be retrieved or `-` to /// read the image from the request body. This parameter may only be used when importing an /// image. pub from_src: T, /// Repository name given to an image when it is imported. The repo may include a tag. This /// parameter may only be used when importing an image. pub repo: T, /// Tag or digest. If empty when pulling an image, this causes all tags for the given image to /// be pulled. pub tag: T, /// Platform in the format `os[/arch[/variant]]` pub platform: T, } /// Parameters to the [List Images /// API](Docker::list_images()) /// /// ## Examples /// /// ```rust /// use bollard::image::ListImagesOptions; /// /// use std::collections::HashMap; /// use std::default::Default; /// /// let mut filters = HashMap::new(); /// filters.insert("dangling", vec!["true"]); /// /// ListImagesOptions{ /// all: true, /// filters, /// ..Default::default() /// }; /// ``` /// /// ```rust /// # use bollard::image::ListImagesOptions; /// # use std::default::Default; /// ListImagesOptions::<String>{ /// ..Default::default() /// }; /// ``` /// #[derive(Debug, Clone, Default, Serialize)] pub struct ListImagesOptions<T> where T: Into<String> + Eq + Hash + Serialize, { /// Show all images. Only images from a final layer (no children) are shown by default. pub all: bool, /// A JSON encoded value of the filters to process on the images list. Available filters: /// - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) /// - `dangling`=`true` /// - `label`=`key` or `label`=`"key=value"` of an image label /// - `reference`=(`<image-name>[:<tag>]`) /// - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) #[serde(serialize_with = "crate::docker::serialize_as_json")] pub filters: HashMap<T, Vec<T>>, /// Show digest information as a RepoDigests field on each image. pub digests: bool, } /// Parameters to the [Prune Images API](Docker::prune_images()) /// /// ## Examples /// /// ```rust /// use bollard::image::PruneImagesOptions; /// /// use std::collections::HashMap; /// /// let mut filters = HashMap::new(); /// filters.insert("until", vec!["10m"]); /// /// PruneImagesOptions{ /// filters, /// }; /// ``` /// /// ```rust /// # use bollard::image::PruneImagesOptions; /// # use std::default::Default; /// PruneImagesOptions::<String>{ /// ..Default::default() /// }; /// ``` /// #[derive(Debug, Clone, Default, Serialize)] pub struct PruneImagesOptions<T> where T: Into<String> + Eq + Hash + Serialize, { /// Filters to process on the prune list, encoded as JSON. Available filters: /// - `dangling=<boolean>` When set to `true` (or `1`), prune only unused *and* untagged /// images. When set to `false` (or `0`), all unused images are pruned. /// - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be /// Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) /// computed relative to the daemon machine’s time. /// - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or /// `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the /// specified labels. #[serde(serialize_with = "crate::docker::serialize_as_json")] pub filters: HashMap<T, Vec<T>>, } /// Parameters to the [Search Images API](Docker::search_images()) /// /// ## Example /// /// ```rust /// use bollard::image::SearchImagesOptions; /// use std::default::Default; /// use std::collections::HashMap; /// /// let mut filters = HashMap::new(); /// filters.insert("until", vec!["10m"]); /// /// SearchImagesOptions { /// term: "hello-world", /// filters, /// ..Default::default() /// }; /// ``` /// /// ```rust /// # use bollard::image::SearchImagesOptions; /// # use std::default::Default; /// SearchImagesOptions::<String> { /// ..Default::default() /// }; /// ``` #[derive(Debug, Clone, Default, Serialize)] pub struct SearchImagesOptions<T> where T: Into<String> + Eq + Hash + Serialize, { /// Term to search (required) pub term: T, /// Maximum number of results to return pub limit: Option<u64>, /// A JSON encoded value of the filters to process on the images list. Available filters: /// - `is-automated=(true|false)` /// - `is-official=(true|false)` /// - `stars=<number>` Matches images that has at least 'number' stars. #[serde(serialize_with = "crate::docker::serialize_as_json")] pub filters: HashMap<T, Vec<T>>, } /// Parameters to the [Remove Image API](Docker::remove_image()) /// /// ## Examples /// /// ```rust /// use bollard::image::RemoveImageOptions; /// use std::default::Default; /// /// RemoveImageOptions { /// force: true, /// ..Default::default() /// }; /// ``` #[derive(Debug, Copy, Clone, Default, Serialize)] pub struct RemoveImageOptions { /// Remove the image even if it is being used by stopped containers or has other tags. pub force: bool, /// Do not delete untagged parent images. pub noprune: bool, } /// Parameters to the [Tag Image API](Docker::tag_image()) /// /// ## Examples /// /// ```rust /// use bollard::image::TagImageOptions; /// use std::default::Default; /// /// let tag_options = TagImageOptions { /// tag: "v1.0.1", /// ..Default::default() /// }; /// ``` /// /// ```rust /// # use bollard::image::TagImageOptions; /// # use std::default::Default; /// let tag_options = TagImageOptions::<String> { /// ..Default::default() /// }; /// ``` #[derive(Debug, Clone, Default, Serialize)] pub struct TagImageOptions<T> where T: Into<String> + Serialize, { /// The repository to tag in. For example, `someuser/someimage`. pub repo: T, /// The name of the new tag. pub tag: T, } /// Parameters to the [Push Image API](Docker::push_image()) /// /// ## Examples /// /// ```rust /// use bollard::image::PushImageOptions; /// /// PushImageOptions { /// tag: "v1.0.1", /// }; /// ``` /// /// ``` /// # use bollard::image::PushImageOptions; /// # use std::default::Default; /// PushImageOptions::<String> { /// ..Default::default() /// }; /// ``` #[derive(Debug, Clone, Default, Serialize)] pub struct PushImageOptions<T> where T: Into<String> + Serialize, { /// The tag to associate with the image on the registry. pub tag: T, } /// Parameters to the [Commit Container API](Docker::commit_container()) /// /// ## Examples /// /// ```rust /// use bollard::image::CommitContainerOptions; /// /// CommitContainerOptions { /// container: "my-running-container", /// pause: true, /// ..Default::default() /// }; /// ``` /// /// ``` /// # use bollard::image::CommitContainerOptions; /// # use std::default::Default; /// CommitContainerOptions::<String> { /// ..Default::default() /// }; /// ``` #[derive(Debug, Clone, Default, Serialize)] pub struct CommitContainerOptions<T> where T: Into<String> + Serialize, { /// The ID or name of the container to commit. pub container: T, /// Repository name for the created image. pub repo: T, /// Tag name for the create image. pub tag: T, /// Commit message. pub comment: T, /// Author of the image. pub author: T, /// Whether to pause the container before committing. pub pause: bool, /// `Dockerfile` instructions to apply while committing pub changes: Option<T>, } /// Parameters to the [Build Image API](Docker::build_image()) /// /// ## Examples /// /// ```rust /// use bollard::image::BuildImageOptions; /// /// BuildImageOptions { /// dockerfile: "Dockerfile", /// t: "my-image", /// ..Default::default() /// }; /// ``` /// /// ``` /// # use bollard::image::BuildImageOptions; /// # use std::default::Default; /// BuildImageOptions::<String> { /// ..Default::default() /// }; /// ``` #[derive(Debug, Clone, Default, Serialize)] pub struct BuildImageOptions<T> where T: Into<String> + Eq + Hash + Serialize, { /// Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and /// points to an external `Dockerfile`. pub dockerfile: T, /// A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag /// the default `latest` value is assumed. You can provide several `t` parameters. pub t: T, /// Extra hosts to add to `/etc/hosts`. pub extrahosts: Option<T>, /// A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, /// the file’s contents are placed into a file called `Dockerfile` and the image is built from /// that file. If the URI points to a tarball, the file is downloaded by the daemon and the /// contents therein used as the context for the build. If the URI points to a tarball and the /// `dockerfile` parameter is also specified, there must be a file with the corresponding path /// inside the tarball. pub remote: T, /// Suppress verbose build output. pub q: bool, /// Do not use the cache when building the image. pub nocache: bool, /// JSON array of images used for build cache resolution. #[serde(serialize_with = "crate::docker::serialize_as_json")] pub cachefrom: Vec<T>, /// Attempt to pull the image even if an older image exists locally. pub pull: bool, /// Remove intermediate containers after a successful build. pub rm: bool, /// Always remove intermediate containers, even upon failure. pub forcerm: bool, /// Set memory limit for build. pub memory: Option<u64>, /// Total memory (memory + swap). Set as `-1` to disable swap. pub memswap: Option<i64>, /// CPU shares (relative weight). pub cpushares: Option<u64>, /// CPUs in which to allow execution (e.g., `0-3`, `0,1`). pub cpusetcpus: T, /// The length of a CPU period in microseconds. pub cpuperiod: Option<u64>, /// Microseconds of CPU time that the container can get in a CPU period. pub cpuquota: Option<u64>, /// JSON map of string pairs for build-time variables. Users pass these values at build-time. /// Docker uses the buildargs as the environment context for commands run via the `Dockerfile` /// RUN instruction, or for variable expansion in other `Dockerfile` instructions. #[serde(serialize_with = "crate::docker::serialize_as_json")] pub buildargs: HashMap<T, T>, /// Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. pub shmsize: Option<u64>, /// Squash the resulting images layers into a single layer. pub squash: bool, /// Arbitrary key/value labels to set on the image, as a JSON map of string pairs. #[serde(serialize_with = "crate::docker::serialize_as_json")] pub labels: HashMap<T, T>, /// Sets the networking mode for the run commands during build. Supported standard values are: /// `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's /// name to which this container should connect to. pub networkmode: T, /// Platform in the format `os[/arch[/variant]]` pub platform: T, } /// Parameters to the [Import Image API](Docker::import_image()) /// /// ## Examples /// /// ```rust /// use bollard::image::ImportImageOptions; /// use std::default::Default; /// /// ImportImageOptions { /// quiet: true, /// ..Default::default() /// }; /// ``` #[derive(Debug, Copy, Clone, Default, Serialize)] pub struct ImportImageOptions { /// Suppress progress details during load. pub quiet: bool, } impl Docker { /// --- /// /// # List Images /// /// Returns a list of images on the server. Note that it uses a different, smaller /// representation of an image than inspecting a single image /// /// # Arguments /// /// - An optional [List Images Options](ListImagesOptions) struct. /// /// # Returns /// /// - Vector of [API Images](ImageSummary), wrapped in a Future. /// /// # Examples /// /// ```rust,no_run /// # use bollard::Docker; /// # let docker = Docker::connect_with_http_defaults().unwrap(); /// use bollard::image::ListImagesOptions; /// /// use std::collections::HashMap; /// use std::default::Default; /// /// let mut filters = HashMap::new(); /// filters.insert("dangling", vec!["true"]); /// /// let options = Some(ListImagesOptions{ /// all: true, /// filters, /// ..Default::default() /// }); /// /// docker.list_images(options); /// ``` pub async fn list_images<T>( &self, options: Option<ListImagesOptions<T>>, ) -> Result<Vec<ImageSummary>, Error> where T: Into<String> + Eq + Hash + Serialize, { let url = "/images/json"; let req = self.build_request( url, Builder::new().method(Method::GET), options, Ok(Body::empty()), ); self.process_into_value(req).await } /// --- /// /// # Create Image /// /// Create an image by either pulling it from a registry or importing it. /// /// # Arguments /// /// - An optional [Create Image Options](CreateImageOptions) struct. /// - An optional request body consisting of a tar or tar.gz archive with the root file system /// for the image. If this argument is used, the value of the `from_src` option must be "-". /// /// # Returns /// /// - [Build Info](BuildInfo), wrapped in an asynchronous /// Stream. /// /// # Examples /// /// ```rust /// # use bollard::Docker; /// # let docker = Docker::connect_with_http_defaults().unwrap(); /// use bollard::image::CreateImageOptions; /// /// use std::default::Default; /// /// let options = Some(CreateImageOptions{ /// from_image: "hello-world", /// ..Default::default() /// }); /// /// docker.create_image(options, None, None); /// /// // do some other work while the image is pulled from the docker hub... /// ``` /// /// # Unsupported /// /// - Import from tarball /// pub fn create_image<T>( &self, options: Option<CreateImageOptions<T>>, root_fs: Option<Body>, credentials: Option<DockerCredentials>, ) -> impl Stream<Item = Result<CreateImageInfo, Error>> where T: Into<String> + Serialize, { let url = "/images/create"; match serde_json::to_string(&credentials.unwrap_or_else(|| DockerCredentials { ..Default::default() })) { Ok(ser_cred) => { let req = self.build_request( url, Builder::new() .method(Method::POST) .header("X-Registry-Auth", base64_url_encode(&ser_cred)), options, match root_fs { Some(body) => Ok(body), None => Ok(Body::empty()), }, ); self.process_into_stream(req).boxed() } Err(e) => stream::once(async move { Err(Error::from(e)) }).boxed(), } } /// --- /// /// # Inspect Image /// /// Return low-level information about an image. /// /// # Arguments /// /// - Image name as a string slice. /// /// # Returns /// /// - [Image](Image), wrapped in a Future. /// /// # Examples /// /// ```rust /// # use bollard::Docker; /// # let docker = Docker::connect_with_http_defaults().unwrap(); /// /// use std::default::Default; /// /// docker.inspect_image("hello-world"); /// ``` pub async fn inspect_image(&self, image_name: &str) -> Result<Image, Error> { let url = format!("/images/{}/json", image_name); let req = self.build_request( &url, Builder::new().method(Method::GET), None::<String>, Ok(Body::empty()), ); self.process_into_value(req).await } /// --- /// /// # Prune Images /// /// Delete unused images. /// /// # Arguments /// /// - An optional [Prune Images Options](PruneImagesOptions) struct. /// /// # Returns /// /// - a [Prune Image Response](ImagePruneResponse), wrapped in a Future. /// /// # Examples /// /// ```rust /// # use bollard::Docker; /// # let docker = Docker::connect_with_http_defaults().unwrap(); /// use bollard::image::PruneImagesOptions; /// /// use std::collections::HashMap; /// /// let mut filters = HashMap::new(); /// filters.insert("until", vec!["10m"]); /// /// let options = Some(PruneImagesOptions { /// filters /// }); /// /// docker.prune_images(options); /// ``` pub async fn prune_images<T>( &self, options: Option<PruneImagesOptions<T>>, ) -> Result<ImagePruneResponse, Error> where T: Into<String> + Eq + Hash + Serialize, { let url = "/images/prune"; let req = self.build_request( url, Builder::new().method(Method::POST), options, Ok(Body::empty()), ); self.process_into_value(req).await } /// --- /// /// # Image History /// /// Return parent layers of an image. /// /// # Arguments /// /// - Image name as a string slice. /// /// # Returns /// /// - Vector of [History Response Item](HistoryResponseItem), wrapped in a /// Future. /// /// # Examples /// /// ```rust /// # use bollard::Docker; /// # let docker = Docker::connect_with_http_defaults().unwrap(); /// /// docker.image_history("hello-world"); /// ``` pub async fn image_history(&self, image_name: &str) -> Result<Vec<HistoryResponseItem>, Error> { let url = format!("/images/{}/history", image_name); let req = self.build_request( &url, Builder::new().method(Method::GET), None::<String>, Ok(Body::empty()), ); self.process_into_value(req).await } /// --- /// /// # Search Images /// /// Search for an image on Docker Hub. /// /// # Arguments /// /// - [Search Image Options](SearchImagesOptions) struct. /// /// # Returns /// /// - Vector of [Image Search Response Item](ImageSearchResponseItem) results, wrapped in a /// Future. /// /// # Examples /// /// ```rust /// # use bollard::Docker; /// /// use bollard::image::SearchImagesOptions; /// use std::default::Default; /// use std::collections::HashMap; /// /// let mut filters = HashMap::new(); /// filters.insert("until", vec!["10m"]); /// /// # let docker = Docker::connect_with_http_defaults().unwrap(); /// let search_options = SearchImagesOptions { /// term: "hello-world", /// filters, /// ..Default::default() /// }; /// /// docker.search_images(search_options); /// ``` pub async fn search_images<T>( &self, options: SearchImagesOptions<T>, ) -> Result<Vec<ImageSearchResponseItem>, Error> where T: Into<String> + Eq + Hash + Serialize, { let url = "/images/search"; let req = self.build_request( url, Builder::new().method(Method::GET), Some(options), Ok(Body::empty()), ); self.process_into_value(req).await } /// --- /// /// # Remove Image /// /// Remove an image, along with any untagged parent images that were referenced by that image. /// /// # Arguments /// /// - Image name as a string slice. /// - An optional [Remove Image Options](RemoveImageOptions) struct. /// /// # Returns /// /// - Vector of [Image Delete Response Item](ImageDeleteResponseItem), wrapped in a /// Future. /// /// # Examples /// /// ```rust /// # use bollard::Docker; /// /// use bollard::image::RemoveImageOptions; /// use std::default::Default; /// /// # let docker = Docker::connect_with_http_defaults().unwrap(); /// let remove_options = Some(RemoveImageOptions { /// force: true, /// ..Default::default() /// }); /// /// docker.remove_image("hello-world", remove_options, None); /// ``` pub async fn remove_image( &self, image_name: &str, options: Option<RemoveImageOptions>, credentials: Option<DockerCredentials>, ) -> Result<Vec<ImageDeleteResponseItem>, Error> { let url = format!("/images/{}", image_name); match serde_json::to_string(&credentials.unwrap_or_else(|| DockerCredentials { ..Default::default() })) { Ok(ser_cred) => { let req = self.build_request( &url, Builder::new() .method(Method::DELETE) .header("X-Registry-Auth", base64_url_encode(&ser_cred)), options, Ok(Body::empty()), ); self.process_into_value(req).await } Err(e) => Err(e.into()), } } /// --- /// /// # Tag Image /// /// Tag an image so that it becomes part of a repository. /// /// # Arguments /// /// - Image name as a string slice. /// - Optional [Tag Image Options](TagImageOptions) struct. /// /// # Returns /// /// - unit type `()`, wrapped in a Future. /// /// # Examples /// /// ```rust /// # use bollard::Docker; /// /// use bollard::image::TagImageOptions; /// use std::default::Default; /// /// # let docker = Docker::connect_with_http_defaults().unwrap(); /// let tag_options = Some(TagImageOptions { /// tag: "v1.0.1", /// ..Default::default() /// }); /// /// docker.tag_image("hello-world", tag_options); /// ``` pub async fn tag_image<T>( &self, image_name: &str, options: Option<TagImageOptions<T>>, ) -> Result<(), Error> where T: Into<String> + Serialize, { let url = format!("/images/{}/tag", image_name); let req = self.build_request( &url, Builder::new().method(Method::POST), options, Ok(Body::empty()), ); self.process_into_unit(req).await } /// --- /// /// # Push Image /// /// Push an image to a registry. /// /// # Arguments /// /// - Image name as a string slice. /// - Optional [Push Image Options](PushImageOptions) struct. /// - Optional [Docker Credentials](DockerCredentials) struct. /// /// # Returns /// /// - unit type `()`, wrapped in a Future. /// /// # Examples /// /// ```rust /// # use bollard::Docker; /// /// use bollard::auth::DockerCredentials; /// use bollard::image::PushImageOptions; /// /// use std::default::Default; /// /// # let docker = Docker::connect_with_http_defaults().unwrap(); /// let push_options = Some(PushImageOptions { /// tag: "v1.0.1", /// }); /// /// let credentials = Some(DockerCredentials { /// username: Some("Jack".to_string()), /// password: Some("myverysecretpassword".to_string()), /// ..Default::default() /// }); /// /// docker.push_image("hello-world", push_options, credentials); /// ``` pub fn push_image<T>( &self, image_name: &str, options: Option<PushImageOptions<T>>, credentials: Option<DockerCredentials>, ) -> impl Stream<Item = Result<PushImageInfo, Error>> where T: Into<String> + Serialize, { let url = format!("/images/{}/push", image_name); match serde_json::to_string(&credentials.unwrap_or_else(|| DockerCredentials { ..Default::default() })) { Ok(ser_cred) => { let req = self.build_request( &url, Builder::new() .method(Method::POST) .header(CONTENT_TYPE, "application/json") .header("X-Registry-Auth", base64_url_encode(&ser_cred)), options, Ok(Body::empty()), ); self.process_into_stream(req).boxed() } Err(e) => stream::once(async move { Err(e.into()) }).boxed(), } } /// --- /// /// # Commit Container /// /// Create a new image from a container. /// /// # Arguments /// /// - [Commit Container Options](CommitContainerOptions) struct. /// - Container [Config](Config) struct. /// /// # Returns /// /// - [Commit](Commit), wrapped in a Future. /// /// # Examples /// /// ```rust /// # use bollard::Docker; /// # let docker = Docker::connect_with_http_defaults().unwrap(); /// use bollard::image::CommitContainerOptions; /// use bollard::container::Config; /// /// use std::default::Default; /// /// let options = CommitContainerOptions{ /// container: "my-running-container", /// pause: true, /// ..Default::default() /// }; /// /// let config = Config::<String> { /// ..Default::default() /// }; /// /// docker.commit_container(options, config); /// ``` pub async fn commit_container<T, Z>( &self, options: CommitContainerOptions<T>, config: Config<Z>, ) -> Result<Commit, Error> where T: Into<String> + Serialize, Z: Into<String> + Eq + Hash + Serialize, { let url = "/commit"; let req = self.build_request( url, Builder::new().method(Method::POST), Some(options), Docker::serialize_payload(Some(config)), ); self.process_into_value(req).await } /// --- /// /// # Build Image /// /// Build an image from a tar archive with a `Dockerfile` in it. /// /// The `Dockerfile` specifies how the image is built from the tar archive. It is typically in /// the archive's root, but can be at a different path or have a different name by specifying /// the `dockerfile` parameter. /// /// # Arguments /// /// - [Build Image Options](BuildImageOptions) struct. /// - Optional [Docker Credentials](DockerCredentials) struct. /// - Tar archive compressed with one of the following algorithms: identity (no compression), /// gzip, bzip2, xz. Optional [Hyper Body](hyper::body::Body). /// /// # Returns /// /// - [Create Image Info](CreateImageInfo), wrapped in an asynchronous /// Stream. /// /// # Examples /// /// ```rust,no_run /// # use bollard::Docker; /// # let docker = Docker::connect_with_http_defaults().unwrap(); /// use bollard::image::BuildImageOptions; /// use bollard::container::Config; /// /// use std::default::Default; /// use std::fs::File; /// use std::io::Read; /// /// let options = BuildImageOptions{ /// dockerfile: "Dockerfile", /// t: "my-image", /// rm: true, /// ..Default::default() /// }; /// /// let mut file = File::open("tarball.tar.gz").unwrap(); /// let mut contents = Vec::new(); /// file.read_to_end(&mut contents).unwrap(); /// /// docker.build_image(options, None, Some(contents.into())); /// ``` pub fn build_image<T>( &self, options: BuildImageOptions<T>, credentials: Option<HashMap<String, DockerCredentials>>, tar: Option<Body>, ) -> impl Stream<Item = Result<BuildInfo, Error>> where T: Into<String> + Eq + Hash + Serialize, { let url = "/build"; match serde_json::to_string(&credentials.unwrap_or_else(HashMap::new)) { Ok(ser_cred) => { let req = self.build_request( &url, Builder::new() .method(Method::POST) .header(CONTENT_TYPE, "application/x-tar") .header("X-Registry-Config", base64_url_encode(&ser_cred)), Some(options), Ok(tar.unwrap_or_else(Body::empty)), ); self.process_into_stream(req).boxed() } Err(e) => stream::once(async move { Err(e.into()) }).boxed(), } } /// --- /// /// # Export Image /// /// Get a tarball containing all images and metadata for a repository. /// /// The root of the resulting tar file will contain the file "mainifest.json". If the export is /// of an image repository, rather than a signle image, there will also be a `repositories` file /// with a JSON description of the exported image repositories. /// Additionally, each layer of all exported images will have a sub directory in the archive /// containing the filesystem of the layer. /// /// See the [Docker API documentation](https://docs.docker.com/engine/api/v1.40/#operation/ImageCommit) /// for more information. /// # Arguments /// - The `image_name` string can refer to an individual image and tag (e.g. alpine:latest), /// an individual image by I /// /// # Returns /// - An uncompressed TAR archive pub fn export_image(&self, image_name: &str) -> impl Stream<Item = Result<Bytes, Error>> { let url = format!("/images/{}/get", image_name); let req = self.build_request( &url, Builder::new() .method(Method::GET) .header(CONTENT_TYPE, "application/json"), None::<String>, Ok(Body::empty()), ); self.process_into_body(req) } /// --- /// /// # Import Image /// /// Load a set of images and tags into a repository. /// /// For details on the format, see the [export image /// endpoint](struct.Docker.html#method.export_image). /// /// # Arguments /// - [Image Import Options](ImportImageOptions) struct. /// /// # Returns /// /// - [Build Info](BuildInfo), wrapped in an asynchronous /// Stream. /// /// # Examples /// /// ```rust,no_run /// # use bollard::Docker; /// # let docker = Docker::connect_with_http_defaults().unwrap(); /// use bollard::image::ImportImageOptions; /// use bollard::errors::Error; /// /// use std::default::Default; /// use futures_util::stream::StreamExt; /// use tokio::fs::File; /// use tokio::io::AsyncWriteExt; /// use tokio_util::codec; /// /// let options = ImportImageOptions{ /// ..Default::default() /// }; /// /// async move { /// let mut file = File::open("tarball.tar.gz").await.unwrap(); /// /// let byte_stream = codec::FramedRead::new(file, codec::BytesCodec::new()).map(|r| { /// let bytes = r.unwrap().freeze(); /// Ok::<_, Error>(bytes) /// }); /// let body = hyper::Body::wrap_stream(byte_stream); /// let mut stream = docker /// .import_image( /// ImportImageOptions { /// ..Default::default() /// }, /// body, /// None, /// ); /// /// while let Some(response) = stream.next().await { /// // ... /// } /// }; /// ``` pub fn import_image( &self, options: ImportImageOptions, root_fs: Body, credentials: Option<HashMap<String, DockerCredentials>>, ) -> impl Stream<Item = Result<BuildInfo, Error>> {
match serde_json::to_string(&credentials.unwrap_or_else(HashMap::new)) { Ok(ser_cred) => { let req = self.build_request( "/images/load", Builder::new() .method(Method::POST) .header(CONTENT_TYPE, "application/json") .header("X-Registry-Config", base64_url_encode(&ser_cred)), Some(options), Ok(root_fs), ); self.process_into_stream(req).boxed() } Err(e) => stream::once(async move { Err(e.into()) }).boxed(), } } }
test_access.py
# -*- coding: utf-8 -*- # # Copyright (C) 2020-2021 CERN. # Copyright (C) 2020-2021 Northwestern University. # Copyright (C) 2021 TU Wien. # # Invenio-RDM-Records is free software; you can redistribute it and/or modify
"""Test metadata access schema.""" import pytest from marshmallow.exceptions import ValidationError from invenio_rdm_records.services.schemas.access import AccessSchema, \ EmbargoSchema def test_embargo_load_no_until_is_valid(): expected = { "active": False, "until": None, "reason": None } valid_no_until = { "active": False, } assert expected == EmbargoSchema().load(valid_no_until) valid_no_until = { "active": False, "until": None, } assert expected == EmbargoSchema().load(valid_no_until) def test_embargo_dump_no_until_is_valid(): valid_no_until = { "active": False, } assert valid_no_until == EmbargoSchema().dump(valid_no_until) expected = { "active": False, } valid_no_until = { "active": False, "until": None, } assert expected == EmbargoSchema().dump(valid_no_until) def test_valid_full(): valid_full = { "record": "public", "files": "restricted", "embargo": { "active": True, "until": "2120-10-06", "reason": "espionage" }, } assert valid_full == AccessSchema().load(valid_full) @pytest.mark.parametrize("invalid_access,invalid_attr", [ ({"files": "restricted", "embargo": {"active": True, "until": "2131-01-01", "reason": "secret!"}}, "record"), ({"record": "public", "embargo": {"active": True, "until": "2131-01-01", "reason": "secret!"}}, "files"), ({"record": "public", "files": "restricted", "embargo": {"active": False, "until": "2131-01-01", "reason": "secret!"}}, "embargo"), ({"record": "public", "files": "restricted", "embargo": {"active": True, "until": "1999-01-01", "reason": "secret!"}}, "embargo"), ({"record": "invalid", "files": "restricted", "embargo": {"active": False, "until": "1999-01-01", "reason": "secret!"}}, "record"), ({"record": "public", "files": "invalid", "embargo": {"active": False, "until": "1999-01-01", "reason": "secret!"}}, "files"), ]) def test_invalid(invalid_access, invalid_attr): with pytest.raises(ValidationError) as e: AccessSchema().load(invalid_access) error_fields = e.value.messages.keys() assert len(error_fields) == 1 assert invalid_attr in error_fields
# it under the terms of the MIT License; see LICENSE file for more details.
new.go
// Copyright 2016-2018, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "context" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "sort" "strings" "unicode" "github.com/pulumi/pulumi/pkg/apitype" "github.com/pulumi/pulumi/pkg/backend" "github.com/pulumi/pulumi/pkg/backend/display" "github.com/pulumi/pulumi/pkg/backend/httpstate" "github.com/pulumi/pulumi/pkg/engine" "github.com/pulumi/pulumi/pkg/resource/config" "github.com/pulumi/pulumi/pkg/tokens" "github.com/pulumi/pulumi/pkg/workspace" "github.com/pkg/errors" "github.com/pulumi/pulumi/pkg/diag/colors" "github.com/pulumi/pulumi/pkg/util/cmdutil" "github.com/pulumi/pulumi/pkg/util/contract" "github.com/pulumi/pulumi/pkg/util/logging" "github.com/spf13/cobra" survey "gopkg.in/AlecAivazis/survey.v1" surveycore "gopkg.in/AlecAivazis/survey.v1/core" ) // nolint: vetshadow, intentionally disabling here for cleaner err declaration/assignment. func newNewCmd() *cobra.Command { var configArray []string var description string var dir string var force bool var generateOnly bool var name string var offline bool var stack string var suppressOutputs bool var yes bool cmd := &cobra.Command{ Use: "new [template|url]", SuggestFor: []string{"init", "create"}, Short: "Create a new Pulumi project", Args: cmdutil.MaximumNArgs(1), Run: cmdutil.RunFunc(func(cmd *cobra.Command, args []string) error { interactive := cmdutil.Interactive() if !interactive { yes = true // auto-approve changes, since we cannot prompt. } // Prepare options. opts, err := updateFlagsToOptions(interactive, false /*skipPreview*/, yes) if err != nil { return err } opts.Display = display.Options{ Color: cmdutil.GetGlobalColorization(), SuppressOutputs: suppressOutputs, IsInteractive: interactive, } opts.Engine = engine.UpdateOptions{ Parallel: defaultParallel, } // Validate name (if specified) before further prompts/operations. if name != "" && !workspace.IsValidProjectName(name) { return errors.Errorf("'%s' is not a valid project name", name) } // Get the current working directory. cwd, err := os.Getwd() if err != nil { return errors.Wrap(err, "getting the working directory") } originalCwd := cwd // If dir was specified, ensure it exists and use it as the // current working directory. if dir != "" { // Ensure the directory exists. if err = os.MkdirAll(dir, os.ModePerm); err != nil { return errors.Wrap(err, "creating the directory") } // Change the working directory to the specified directory. if err = os.Chdir(dir); err != nil { return errors.Wrap(err, "changing the working directory") } // Get the new working directory. if cwd, err = os.Getwd(); err != nil { return errors.Wrap(err, "getting the working directory") } } // Return an error if the directory isn't empty. if !force { if err = errorIfNotEmptyDirectory(cwd); err != nil { return err } } // If we're going to be creating a stack, get the current backend, which // will kick off the login flow (if not already logged-in). if !generateOnly { if _, err = currentBackend(opts.Display); err != nil { return err } } templateNameOrURL := "" if len(args) > 0 { templateNameOrURL = args[0] } // Retrieve the template repo. repo, err := workspace.RetrieveTemplates(templateNameOrURL, offline) if err != nil { return err } defer func() { contract.IgnoreError(repo.Delete()) }() // List the templates from the repo. templates, err := repo.Templates() if err != nil { return err } var template workspace.Template if len(templates) == 0 { return errors.New("no templates") } else if len(templates) == 1 { template = templates[0] } else { if template, err = chooseTemplate(templates, opts.Display); err != nil { return err } } // Do a dry run, if we're not forcing files to be overwritten. if !force { if err = template.CopyTemplateFilesDryRun(cwd); err != nil { if os.IsNotExist(err) { return errors.Wrapf(err, "template '%s' not found", templateNameOrURL) } return err } } // If a stack was specified via --stack, see if it already exists. var s backend.Stack if stack != "" { existingStack, existingName, existingDesc, err := getStack(stack, opts.Display) if err != nil { return err } s = existingStack if name == "" { name = existingName } if description == "" { description = existingDesc } } // Show instructions, if we're going to show at least one prompt. hasAtLeastOnePrompt := (name == "") || (description == "") || (!generateOnly && stack == "") if !yes && hasAtLeastOnePrompt { fmt.Println("This command will walk you through creating a new Pulumi project.") fmt.Println() fmt.Println("Enter a value or leave blank to accept the default, and press <ENTER>.") fmt.Println("Press ^C at any time to quit.") } // Prompt for the project name, if it wasn't already specified. if name == "" { defaultValue := workspace.ValueOrSanitizedDefaultProjectName(name, template.ProjectName, filepath.Base(cwd)) name, err = promptForValue(yes, "project name", defaultValue, false, workspace.IsValidProjectName, opts.Display) if err != nil { return err } } // Prompt for the project description, if it wasn't already specified. if description == "" { defaultValue := workspace.ValueOrDefaultProjectDescription( description, template.ProjectDescription, template.Description) description, err = promptForValue(yes, "project description", defaultValue, false, nil, opts.Display) if err != nil { return err } } // Actually copy the files. if err = template.CopyTemplateFiles(cwd, force, name, description); err != nil { if os.IsNotExist(err) { return errors.Wrapf(err, "template '%s' not found", templateNameOrURL) } return err } fmt.Printf("Created project '%s'.\n", name) // Load the project, update the name & description, and save it. proj, _, err := readProject() if err != nil { return err } proj.Name = tokens.PackageName(name) proj.Description = &description if err = workspace.SaveProject(proj); err != nil { return errors.Wrap(err, "saving project") } // Create the stack, if needed. if !generateOnly && s == nil { if s, err = promptAndCreateStack(stack, name, true /*setCurrent*/, yes, opts.Display); err != nil { return err } // The backend will print "Created stack '<stack>'." on success. } // Prompt for config values (if needed) and save. if !generateOnly { if err = handleConfig(s, templateNameOrURL, template, configArray, yes, opts.Display); err != nil { return err } } // Install dependencies. if !generateOnly { if err = installDependencies("Installing dependencies..."); err != nil { return err } fmt.Println( opts.Display.Color.Colorize( colors.BrightGreen+colors.Bold+"Your new project is configured and ready to go!"+colors.Reset) + " " + cmdutil.EmojiOr("✨", "")) } // Run `up` automatically, or print out next steps to run `up` manually. if !generateOnly { if err = runUpOrPrintNextSteps(s, originalCwd, cwd, opts, yes); err != nil { return err } } if template.Quickstart != "" { fmt.Println(template.Quickstart) } return nil }), } // Add additional help that includes a list of available templates. defaultHelp := cmd.HelpFunc() cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) { // Show default help. defaultHelp(cmd, args) // Attempt to retrieve available templates. repo, err := workspace.RetrieveTemplates("", false /*offline*/) if err != nil { logging.Warningf("could not retrieve templates: %v", err) return } // Get the list of templates. templates, err := repo.Templates() if err != nil { logging.Warningf("could not list templates: %v", err) return } // If we have any templates, show them. if len(templates) > 0 { available, _ := templatesToOptionArrayAndMap(templates) fmt.Println("") fmt.Println("Available Templates:") for _, t := range available { fmt.Printf(" %s\n", t) } } }) cmd.PersistentFlags().StringArrayVarP( &configArray, "config", "c", []string{}, "Config to save") cmd.PersistentFlags().StringVarP( &description, "description", "d", "", "The project description; if not specified, a prompt will request it") cmd.PersistentFlags().StringVar( &dir, "dir", "", "The location to place the generated project; if not specified, the current directory is used") cmd.PersistentFlags().BoolVarP( &force, "force", "f", false, "Forces content to be generated even if it would change existing files") cmd.PersistentFlags().BoolVarP( &generateOnly, "generate-only", "g", false, "Generate the project only; do not create a stack, save config, or install dependencies") cmd.PersistentFlags().StringVarP( &name, "name", "n", "", "The project name; if not specified, a prompt will request it") cmd.PersistentFlags().BoolVarP( &offline, "offline", "o", false, "Use locally cached templates without making any network requests") cmd.PersistentFlags().StringVarP( &stack, "stack", "s", "", "The stack name; either an existing stack or stack to create; if not specified, a prompt will request it") cmd.PersistentFlags().BoolVar( &suppressOutputs, "suppress-outputs", false, "Suppress display of stack outputs (in case they contain sensitive values)") cmd.PersistentFlags().BoolVarP( &yes, "yes", "y", false, "Skip prompts and proceed with default values") return cmd } // errorIfNotEmptyDirectory returns an error if path is not empty. func errorIfNotEmptyDirectory(path string) error { infos, err := ioutil.ReadDir(path) if err != nil { return err } if len(infos) > 0 { return errors.Errorf("%s is not empty; "+ "rerun in an empty directory, pass the path to an empty directory to --dir, or use --force", path) } return nil } // getStack gets a stack and the project name & description, or returns nil if the stack doesn't exist. func getStack(stack string, opts display.Options) (backend.Stack, string, string, error) { b, err := currentBackend(opts) if err != nil { return nil, "", "", err } stackRef, err := b.ParseStackReference(stack) if err != nil { return nil, "", "", err } s, err := b.GetStack(commandContext(), stackRef) if err != nil { return nil, "", "", err } name := "" description := "" if s != nil { if cs, ok := s.(httpstate.Stack); ok { tags := cs.Tags() name = tags[apitype.ProjectNameTag] description = tags[apitype.ProjectDescriptionTag] } } return s, name, description, nil } // promptAndCreateStack creates and returns a new stack (prompting for the name as needed). func promptAndCreateStack( stack string, projectName string, setCurrent bool, yes bool, opts display.Options) (backend.Stack, error) { b, err := currentBackend(opts) if err != nil { return nil, err } if stack != "" { s, err := stackInit(b, stack, setCurrent) if err != nil { return nil, err } return s, nil } defaultValue := getDevStackName(projectName) for { stackName, err := promptForValue(yes, "stack name", defaultValue, false, nil, opts) if err != nil { return nil, err } s, err := stackInit(b, stackName, setCurrent) if err != nil { if !yes { // Let the user know about the error and loop around to try again. fmt.Printf("Sorry, could not create stack '%s': %v.\n", stackName, err) continue } return nil, err } return s, nil } } // getDevStackName returns the stack name suffixed with -dev. func getDevStackName(name string) string { const suffix = "-dev" // Strip the suffix so we don't include two -dev suffixes // if the name already has it. return strings.TrimSuffix(name, suffix) + suffix } // stackInit creates the stack. func stackInit(b backend.Backend, stackName string, setCurrent bool) (backend.Stack, error) { stackRef, err := b.ParseStackReference(stackName) if err != nil {
} return createStack(b, stackRef, nil, setCurrent) } // saveConfig saves the config for the stack. func saveConfig(stackName tokens.QName, c config.Map) error { ps, err := workspace.DetectProjectStack(stackName) if err != nil { return err } for k, v := range c { ps.Config[k] = v } return workspace.SaveProjectStack(stackName, ps) } // installDependencies will install dependencies for the project, e.g. by running // `npm install` for nodejs projects or `pip install` for python projects. func installDependencies(message string) error { proj, _, err := readProject() if err != nil { return err } // TODO[pulumi/pulumi#1307]: move to the language plugins so we don't have to hard code here. var command string var c *exec.Cmd if strings.EqualFold(proj.Runtime.Name(), "nodejs") { command = "npm install" c = exec.Command("npm", "install") } else if strings.EqualFold(proj.Runtime.Name(), "python") { command = "pip install -r requirements.txt" c = exec.Command("pip", "install", "-r", "requirements.txt") } else { return nil } if message != "" { fmt.Println(message) } // Run the command. if out, err := c.CombinedOutput(); err != nil { fmt.Fprintf(os.Stderr, "%s", out) return errors.Wrapf(err, "installing dependencies; rerun '%s' manually to try again", command) } return nil } // runUpOrPrintNextSteps runs `up` automatically, or if `up` shouldn't run, prints out a message with next steps. func runUpOrPrintNextSteps( stack backend.Stack, originalCwd string, cwd string, opts backend.UpdateOptions, yes bool) error { proj, root, err := readProject() if err != nil { return err } // Currently go projects require a build/install step before deployment, so we won't automatically run `up` for // such projects. Once we switch over to using `go run` for go, we can remove this and always run `up`. runUp := !strings.EqualFold(proj.Runtime.Name(), "go") if runUp { m, err := getUpdateMetadata("", root) if err != nil { return errors.Wrap(err, "gathering environment metadata") } _, err = stack.Update(commandContext(), backend.UpdateOperation{ Proj: proj, Root: root, M: m, Opts: opts, Scopes: cancellationScopes, }) switch { case err == context.Canceled: return errors.New("update cancelled") case err != nil: return PrintEngineError(err) default: return nil } } else { // If the current working directory changed, add instructions to cd into the directory. var deployMsg string if originalCwd != cwd { // If we can determine a relative path, use that, otherwise use the full path. var cd string if rel, err := filepath.Rel(originalCwd, cwd); err == nil { cd = rel } else { cd = cwd } // Surround the path with double quotes if it contains whitespace. if containsWhiteSpace(cd) { cd = fmt.Sprintf("\"%s\"", cd) } cd = fmt.Sprintf("cd %s", cd) deployMsg = "To deploy it, '" + cd + "' and then run 'pulumi up'." deployMsg = colors.Highlight(deployMsg, cd, colors.BrightBlue+colors.Underline+colors.Bold) } else { deployMsg = "To deploy it, run 'pulumi up'." } // Colorize and print the next step deploy action. deployMsg = colors.Highlight(deployMsg, "pulumi up", colors.BrightBlue+colors.Underline+colors.Bold) fmt.Println(opts.Display.Color.Colorize(deployMsg)) } return nil } // chooseTemplate will prompt the user to choose amongst the available templates. func chooseTemplate(templates []workspace.Template, opts display.Options) (workspace.Template, error) { const chooseTemplateErr = "no template selected; please use `pulumi new` to choose one" if !cmdutil.Interactive() { return workspace.Template{}, errors.New(chooseTemplateErr) } // Customize the prompt a little bit (and disable color since it doesn't match our scheme). surveycore.DisableColor = true surveycore.QuestionIcon = "" surveycore.SelectFocusIcon = opts.Color.Colorize(colors.BrightGreen + ">" + colors.Reset) message := "\rPlease choose a template:" message = opts.Color.Colorize(colors.SpecPrompt + message + colors.Reset) options, optionToTemplateMap := templatesToOptionArrayAndMap(templates) var option string if err := survey.AskOne(&survey.Select{ Message: message, Options: options, PageSize: len(options), }, &option, nil); err != nil { return workspace.Template{}, errors.New(chooseTemplateErr) } return optionToTemplateMap[option], nil } // parseConfig parses the config values passed via command line flags. // These are passed as `-c aws:region=us-east-1 -c foo:bar=blah` and end up // in configArray as ["aws:region=us-east-1", "foo:bar=blah"]. // This function converts the array into a config.Map. func parseConfig(configArray []string) (config.Map, error) { configMap := make(config.Map) for _, c := range configArray { kvp := strings.SplitN(c, "=", 2) key, err := parseConfigKey(kvp[0]) if err != nil { return nil, err } value := config.NewValue("") if len(kvp) == 2 { value = config.NewValue(kvp[1]) } configMap[key] = value } return configMap, nil } // promptForConfig will go through each config key needed by the template and prompt for a value. // If a config value exists in commandLineConfig, it will be used without prompting. // If stackConfig is non-nil and a config value exists in stackConfig, it will be used as the default // value when prompting instead of the default value specified in templateConfig. func promptForConfig( stack backend.Stack, templateConfig map[string]workspace.ProjectTemplateConfigValue, commandLineConfig config.Map, stackConfig config.Map, yes bool, opts display.Options) (config.Map, error) { // Convert `string` keys to `config.Key`. If a string key is missing a delimiter, // the project name will be prepended. parsedTemplateConfig := make(map[config.Key]workspace.ProjectTemplateConfigValue) for k, v := range templateConfig { parsedKey, parseErr := parseConfigKey(k) if parseErr != nil { return nil, parseErr } parsedTemplateConfig[parsedKey] = v } // Sort keys. Note that we use the fully qualified module member here instead of a `prettyKey` so that // all config values for the current program are prompted one after another. var keys config.KeyArray for k := range parsedTemplateConfig { keys = append(keys, k) } sort.Sort(keys) var err error var crypter config.Crypter c := make(config.Map) for _, k := range keys { // If it was passed as a command line flag, use it without prompting. if val, ok := commandLineConfig[k]; ok { c[k] = val continue } templateConfigValue := parsedTemplateConfig[k] // Prepare a default value. var defaultValue string var secret bool if stackConfig != nil { // Use the stack's existing value as the default. if val, ok := stackConfig[k]; ok { secret = val.Secure() // Lazily get the crypter, only if needed, to avoid prompting for a password with the local backend. if secret && crypter == nil { if crypter, err = backend.GetStackCrypter(stack); err != nil { return nil, err } } // It's OK to pass a nil or non-nil crypter for non-secret values. value, err := val.Value(crypter) if err != nil { return nil, err } defaultValue = value } } if defaultValue == "" { defaultValue = templateConfigValue.Default } if !secret { secret = templateConfigValue.Secret } // Prepare the prompt. prompt := prettyKey(k) if templateConfigValue.Description != "" { prompt = prompt + ": " + templateConfigValue.Description } // Prompt. value, err := promptForValue(yes, prompt, defaultValue, secret, nil, opts) if err != nil { return nil, err } // Encrypt the value if needed. var v config.Value if secret { // Lazily get the crypter, only if needed, to avoid prompting for a password with the local backend. if crypter == nil { if crypter, err = backend.GetStackCrypter(stack); err != nil { return nil, err } } enc, err := crypter.EncryptValue(value) if err != nil { return nil, err } v = config.NewSecureValue(enc) } else { v = config.NewValue(value) } // Save it. c[k] = v } // Add any other config values from the command line. for k, v := range commandLineConfig { if _, ok := c[k]; !ok { c[k] = v } } return c, nil } // promptForValue prompts the user for a value with a defaultValue preselected. Hitting enter accepts the // default. If yes is true, defaultValue is returned without prompting. isValidFn is an optional parameter; // when specified, it will be run to validate that value entered. An invalid value will result in an error // message followed by another prompt for the value. func promptForValue( yes bool, prompt string, defaultValue string, secret bool, isValidFn func(value string) bool, opts display.Options) (string, error) { if yes { return defaultValue, nil } for { if defaultValue == "" { prompt = opts.Color.Colorize( fmt.Sprintf("%s%s:%s ", colors.BrightCyan, prompt, colors.Reset)) } else { defaultValuePrompt := defaultValue if secret { defaultValuePrompt = "[secret]" } prompt = opts.Color.Colorize( fmt.Sprintf("%s%s: (%s)%s ", colors.BrightCyan, prompt, defaultValuePrompt, colors.Reset)) } fmt.Print(prompt) // Read the value. var err error var value string if secret { value, err = cmdutil.ReadConsoleNoEcho("") if err != nil { return "", err } } else { value, err = cmdutil.ReadConsole("") if err != nil { return "", err } } value = strings.TrimSpace(value) if value != "" { if isValidFn == nil || isValidFn(value) { return value, nil } // The value is invalid, let the user know and try again fmt.Printf("Sorry, '%s' is not a valid %s.\n", value, prompt) continue } return defaultValue, nil } } // templatesToOptionArrayAndMap returns an array of option strings and a map of option strings to templates. // Each option string is made up of the template name and description with some padding in between. func templatesToOptionArrayAndMap(templates []workspace.Template) ([]string, map[string]workspace.Template) { // Find the longest name length. Used to add padding between the name and description. maxNameLength := 0 for _, template := range templates { if len(template.Name) > maxNameLength { maxNameLength = len(template.Name) } } // Build the array and map. var options []string nameToTemplateMap := make(map[string]workspace.Template) for _, template := range templates { // Create the option string that combines the name, padding, and description. desc := workspace.ValueOrDefaultProjectDescription("", template.ProjectDescription, template.Description) option := fmt.Sprintf(fmt.Sprintf("%%%ds %%s", -maxNameLength), template.Name, desc) // Add it to the array and map. options = append(options, option) nameToTemplateMap[option] = template } sort.Strings(options) return options, nameToTemplateMap } // containsWhiteSpace returns true if the string contains whitespace. func containsWhiteSpace(value string) bool { for _, c := range value { if unicode.IsSpace(c) { return true } } return false }
return nil, err
getService.ts
// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** // *** Do not edit by hand unless you're certain you know what you are doing! *** import * as pulumi from "@pulumi/pulumi"; import { input as inputs, output as outputs } from "../types"; import * as utilities from "../utilities"; /** * Using this data source can open DataWorks service automatically. If the service has been opened, it will return opened. * * For information about DataWorks and how to use it, see [What is DataWorks](https://www.alibabacloud.com/help/en/product/72772.htm). * * > **NOTE:** Available in v1.118.0+ * * ## Example Usage * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as alicloud from "@pulumi/alicloud"; * * const open = pulumi.output(alicloud.dataworks.getService({ * enable: "On", * }, { async: true })); * ``` */ export function
(args?: GetServiceArgs, opts?: pulumi.InvokeOptions): Promise<GetServiceResult> { args = args || {}; if (!opts) { opts = {} } if (!opts.version) { opts.version = utilities.getVersion(); } return pulumi.runtime.invoke("alicloud:dataworks/getService:getService", { "enable": args.enable, }, opts); } /** * A collection of arguments for invoking getService. */ export interface GetServiceArgs { /** * Setting the value to `On` to enable the service. If has been enabled, return the result. Valid values: `On` or `Off`. Default to `Off`. */ readonly enable?: string; } /** * A collection of values returned by getService. */ export interface GetServiceResult { readonly enable?: string; /** * The provider-assigned unique ID for this managed resource. */ readonly id: string; /** * The current service enable status. */ readonly status: string; }
getService
leetcode62.rs
// https://leetcode-cn.com/problems/unique-paths/ // Runtime: 0 ms // Memory Usage: 2.1 MB pub fn unique_paths(m: i32, n: i32) -> i32 { let m = m as usize; let n = n as usize; let mut a = vec![vec![0; m + 1]; n + 1]; for i in 1..=n { for j in 1..=m { if i == 1 && j == 1 { a[i][j] = 1; } else { a[i][j] = a[i - 1][j] + a[i][j - 1]; } } } a[n][m] } // array dynamic_programming #[test] fn
() { assert_eq!(unique_paths(3, 2), 3); assert_eq!(unique_paths(7, 3), 28); }
test2_62
test_tenant_networks_client.py
# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.services.compute import tenant_networks_client from tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services import base class TestTenantNetworksClient(base.BaseServiceTest): FAKE_NETWORK = { "cidr": "None", "id": "c2329eb4-cc8e-4439-ac4c-932369309e36", "label": u'\u30d7' } FAKE_NETWORKS = [FAKE_NETWORK] NETWORK_ID = FAKE_NETWORK['id'] def setUp(self): super(TestTenantNetworksClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = tenant_networks_client.TenantNetworksClient( fake_auth, 'compute', 'regionOne') def _test_list_tenant_networks(self, bytes_body=False): self.check_service_client_function( self.client.list_tenant_networks, 'tempest.lib.common.rest_client.RestClient.get', {"networks": self.FAKE_NETWORKS}, bytes_body) def test_list_tenant_networks_with_str_body(self): self._test_list_tenant_networks() def test_list_tenant_networks_with_bytes_body(self): self._test_list_tenant_networks(bytes_body=True) def _test_show_tenant_network(self, bytes_body=False): self.check_service_client_function( self.client.show_tenant_network, 'tempest.lib.common.rest_client.RestClient.get', {"network": self.FAKE_NETWORK}, bytes_body, network_id=self.NETWORK_ID) def
(self): self._test_show_tenant_network() def test_show_tenant_network_with_bytes_body(self): self._test_show_tenant_network(bytes_body=True)
test_show_tenant_network_with_str_body
list_users.py
#!/usr/bin/env python3 """ List all users registered in <CWL_ICA_REPO_PATH>/config/user.yaml """ from classes.command import Command from utils.logging import get_logger import pandas as pd from utils.repo import read_yaml, get_user_yaml_path import sys logger = get_logger() class ListUsers(Command): """Usage: cwl-ica [options] list-users help cwl-ica [options] list-users Description: List all registered users in <CWL_ICA_REPO_PATH>/config/user.yaml Example: cwl-ica list-users """ def __init__(self, command_argv): # Collect args from doc strings super().__init__(command_argv) # Check args self.check_args() def __call__(self):
def check_args(self): """ Check if --tenant-name is defined or CWL_ICA_DEFAULT_TENANT is present Or if --tenant-name is set to 'all' :return: """ # Just make sure the user.yaml path exists _ = get_user_yaml_path()
""" Just run through this :return: """ # Check project.yaml exists user_yaml_path = get_user_yaml_path() user_list = read_yaml(user_yaml_path)['users'] # Create pandas df of user yaml path user_df = pd.DataFrame(user_list) # Write user to stdout user_df.to_markdown(sys.stdout, index=False) # Add new line print()
E0059.rs
#![feature(unboxed_closures)]
fn main() { }
fn foo<F: Fn<i32>>(f: F) -> F::Output { f(3) } //~ ERROR E0059
DSModel.py
# @Author: Antoine Pointeau <kalif> # @Date: 2017-03-27T01:24:24+02:00 # @Email: [email protected] # @Filename: UCSModel.py # @Last modified by: kalif # @Last modified time: 2017-04-04T00:50:08+02:00 import os import yaml import copy from ..UCException import * from ..UCChain import * from .Interface import * class DSModel(Interface): def initDS(self, cnt): directory = os.path.dirname(self.ctx.path) self.file = os.path.join(directory, cnt['path']) with open(self.file, 'r') as fd: self.load = yaml.load(fd) if not self.load: self.load = {} self.ctx.path = self.file self.data = {} for prop in self.load: ctx = copy.copy(self.ctx) ctx.fieldName = prop self.data[prop] = self.conf.factory.create(self.conf, ctx, self.load[prop]) def has(self, chain): if not chain.current() in self.data: return False return True def get(self, chain): if not self.has(chain): raise UCException("property '%s' not found" % (chain.current(), chain.trace())) elem = self.data[chain.current()] if isinstance(elem, DSModel): return elem.get(copy.copy(chain).next()) else: return elem.get() def set(self, chain, value):
def extract(self): res = {} for prop in self.data: res[prop] = self.data[prop].extract() return res
if not self.has(chain): raise UCException("property '%s' not found" % (chain.current())) elem = self.data[chain.current()] if isinstance(elem, DSModel): return elem.set(copy.copy(chain).next(), value) else: return elem.set(value)
replicationSubnetGroup.js
"use strict"; /* Generated from https://d3teyb21fexa9r.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json, version 1.13.0 */ Object.defineProperty(exports, "__esModule", { value: true }); const resource_1 = require("../resource");
class ReplicationSubnetGroup extends resource_1.ResourceBase { constructor(properties) { super('AWS::DMS::ReplicationSubnetGroup', properties); } } exports.default = ReplicationSubnetGroup;
Register.js
import React, {useState} from 'react' import {Link} from "react-router-dom"; import axios from 'axios' import {showErrMsg, showSuccessMsg} from "../../../services/notification/Notification";
const initialState = { login: '', email: '', password: '', confirmPassword: '', fullName: '', err: '', success: '' } export default function Register(){ const [user, setUser] = useState(initialState) const {login, email, password, confirmPassword, fullName, err, success} = user const handleChangeInput = e => { const {name, value} = e.target setUser({...user, [name]:value, err: '', success: ''}) } const handleSubmit = async e => { e.preventDefault() if(!isEmail(email)) return setUser({...user, err: "Wrong email", success: ''}) if(!isMatch(password, confirmPassword)) return setUser({...user, err: "Password did not match.", success: ''}) try { const res = await axios.post('/api/auth/register', { login, email, password, fullName }) setUser({...user, err: '', success: res.data.msg}) } catch (err) { err.response.data.msg && setUser({...user, err: err.response.data.msg, success: ''}) } } return ( <div> <h2>Register</h2> {err && showErrMsg(err)} {success && showSuccessMsg(success)} <form onSubmit={handleSubmit}> <div> <label htmlFor={'login'}>Login</label> <input type={'text'} required={true} name={'login'} id={'login'} value={login} onChange={handleChangeInput}/> </div> <div> <label htmlFor={'email'}>Email</label> <input type={'email'} required={true} name={'email'} id={'email'} value={email} onChange={handleChangeInput}/> </div> <div> <label htmlFor={'password'}>Password</label> <input type={'password'} minLength={6} required={true} name={'password'} id={'password'} value={password} onChange={handleChangeInput}/> </div> <div> <label htmlFor={'confirmPassword'}>Confirm password</label> <input type={'password'} required={true} name={'confirmPassword'} id={'confirmPassword'} value={confirmPassword} onChange={handleChangeInput}/> </div> <div> <label htmlFor={'fullName'}>Full name</label> <input type={'text'} name={'fullName'} id={'fullName'} value={fullName} onChange={handleChangeInput}/> </div> <div> <button type={"submit"}>Register</button> </div> </form> <p>Already have an account? <Link to={'/login'}>Login</Link></p> </div> ) }
import {isMatch, isEmail} from '../../../services/validation/Validation'
test_decorators.py
import os import unittest from typing import Optional from django.http import HttpResponse from django.test import RequestFactory from request_limiter import request_limiter, LimitedIntervalStrategy, \ LimitStrategy, LimitException, django_request_limiter os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_settings') req_factory = RequestFactory() class MockStrategy(LimitStrategy): def __init__(self, allow: bool): self._allow = allow def
(self, key: Optional[str] = None) -> bool: return self._allow def get_remaining(self, key: Optional[str] = None) -> float: return 1 def clean(self): pass class TestRequestLimiterDecorator(unittest.TestCase): def test_when_strategy_not_given_uses_limited_interval_strategy(self): limiter = request_limiter() self.assertTrue(isinstance(limiter.strategy, LimitedIntervalStrategy)) def test_when_strategy_allows_invokes_function(self): @request_limiter(strategy=MockStrategy(allow=True)) def test_func() -> bool: return True self.assertTrue(test_func()) def test_when_strategy_denies_raises_exception(self): @request_limiter(strategy=MockStrategy(allow=False)) def test_func() -> bool: return True self.assertRaises(LimitException, test_func) class TestDjangoRequestLimiter(unittest.TestCase): def test_limits_based_on_ip(self): @django_request_limiter @request_limiter(strategy=LimitedIntervalStrategy(requests=1)) def test_view(request): return True res1 = test_view(req_factory.post('/test/', REMOTE_ADDR='127.0.0.1')) assert res1, 'Expected first request to work' res2 = test_view(req_factory.post('/test/', REMOTE_ADDR='127.0.0.1')) assert isinstance(res2, HttpResponse), 'Expected limit http response' assert res2.status_code == 429, 'Expected 429 response code' # change Ip res3 = test_view(req_factory.post('/test/', REMOTE_ADDR='127.0.0.2')) assert res3, 'Expected different ip request to work'
allow
lib.rs
// Copyright 2020 MaidSafe.net limited. // // This SAFE Network Software is licensed to you under the MIT license <LICENSE-MIT // http://opensource.org/licenses/MIT> or the Modified BSD license <LICENSE-BSD // https://opensource.org/licenses/BSD-3-Clause>, at your option. This file may not be copied, // modified, or distributed except according to those terms. Please review the Licences for the // specific language governing permissions and limitations relating to use of the SAFE Network // Software. use directories::BaseDirs; use log::debug; use regex::Regex; use std::{ fs, path::PathBuf, process::{Command, Stdio}, thread, time::Duration, }; use structopt::StructOpt; #[cfg(not(target_os = "windows"))] const SAFE_VAULT_EXECUTABLE: &str = "safe_vault"; #[cfg(target_os = "windows")] const SAFE_VAULT_EXECUTABLE: &str = "safe_vault.exe"; /// Tool to launch SAFE vaults to form a local single-section network /// /// Currently, this tool runs vaults on localhost (since that's the default if no IP address is given to the vaults) #[derive(StructOpt, Debug)] #[structopt(name = "safe-nlt")] struct CmdArgs { /// Verbosity level for this tool #[structopt(short = "v", long, parse(from_occurrences))] verbosity: u8, /// Path where to locate safe_vault/safe_vault.exe binary. The SAFE_VAULT_PATH env var can be also used to set the path #[structopt(short = "p", long, env = "SAFE_VAULT_PATH")] vault_path: Option<PathBuf>, /// Interval in seconds between launching each of the vaults #[structopt(short = "i", long, default_value = "1")] interval: u64, /// Path where the output directories for all the vaults are written #[structopt(short = "d", long, default_value = "./vaults")] vaults_dir: PathBuf, /// Number of vaults to spawn with the first one being the genesis. This number should be greater than 0. #[structopt(short = "n", long, default_value = "8")] num_vaults: u8, /// Verbosity level for vaults logs (default: INFO) #[structopt(short = "y", long, parse(from_occurrences))] vaults_verbosity: u8, } pub fn run() -> Result<(), String> { run_with(None) } pub fn run_with(cmd_args: Option<&[&str]>) -> Result<(), String> { // Let's first get all the arguments passed in, either as function's args, or CLI args let args = match cmd_args { None => CmdArgs::from_args(), Some(cmd_args) => CmdArgs::from_iter_safe(cmd_args).map_err(|err| err.to_string())?, }; let vault_bin_path = get_vault_bin_path(args.vault_path)?; let msg = format!( "Launching with vault executable from: {}", vault_bin_path.display() ); if args.verbosity > 0 { println!("{}", msg); } debug!("{}", msg); let msg = format!("Network size: {} vaults", args.num_vaults); if args.verbosity > 0 { println!("{}", msg); } debug!("{}", msg); let mut common_args: Vec<&str> = vec![]; // We need a minimum of INFO level for vaults verbosity, // since the genesis vault logs the contact info at INFO level let verbosity = format!("-{}", "v".repeat(2 + args.vaults_verbosity as usize)); common_args.push(&verbosity);
// Construct genesis vault's command arguments let genesis_vault_dir = &args.vaults_dir.join("safe-vault-genesis"); let genesis_vault_dir_str = genesis_vault_dir.display().to_string(); let genesis_vault_args = build_vault_args( common_args.clone(), &genesis_vault_dir_str, None, /* genesis */ ); // Let's launch genesis vault now let msg = "Launching genesis vault (#1)..."; if args.verbosity > 0 { println!("{}", msg); } debug!("{}", msg); run_vault_cmd(&vault_bin_path, &genesis_vault_args, args.verbosity)?; // Get port number of genesis vault to pass it as hard-coded contact to the other vaults let interval_duration = Duration::from_secs(args.interval); thread::sleep(interval_duration); let genesis_contact_info = grep_connection_info(&genesis_vault_dir.join("safe_vault.log"))?; let msg = format!("Genesis vault contact info: {}", genesis_contact_info); if args.verbosity > 0 { println!("{}", msg); } debug!("{}", msg); // We can now run the rest of the vaults for i in 2..args.num_vaults + 1 { // Construct current vault's command arguments let vault_dir = &args .vaults_dir .join(&format!("safe-vault-{}", i)) .display() .to_string(); let current_vault_args = build_vault_args(common_args.clone(), &vault_dir, Some(&genesis_contact_info)); let msg = format!("Launching vault #{}...", i); if args.verbosity > 0 { println!("{}", msg); } debug!("{}", msg); run_vault_cmd(&vault_bin_path, &current_vault_args, args.verbosity)?; // We wait for a few secs before launching each new vault thread::sleep(interval_duration); } println!("Done!"); Ok(()) } #[inline] fn get_vault_bin_path(vault_path: Option<PathBuf>) -> Result<PathBuf, String> { match vault_path { Some(p) => Ok(p), None => { let base_dirs = BaseDirs::new().ok_or_else(|| "Failed to obtain user's home path".to_string())?; let mut path = PathBuf::from(base_dirs.home_dir()); path.push(".safe"); path.push("vault"); path.push(SAFE_VAULT_EXECUTABLE); Ok(path) } } } fn build_vault_args<'a>( mut base_args: Vec<&'a str>, vault_dir: &'a str, contact_info: Option<&'a str>, ) -> Vec<&'a str> { if let Some(contact) = contact_info { base_args.push("--hard-coded-contacts"); base_args.push(contact); } else { base_args.push("--first"); } base_args.push("--root-dir"); base_args.push(vault_dir); base_args.push("--log-dir"); base_args.push(vault_dir); base_args } fn run_vault_cmd(vault_path: &PathBuf, args: &[&str], verbosity: u8) -> Result<(), String> { let path_str = vault_path.display().to_string(); let msg = format!("Running '{}' with args {:?} ...", path_str, args); if verbosity > 1 { println!("{}", msg); } debug!("{}", msg); let _child = Command::new(&path_str) .args(args) .stdout(Stdio::null()) .stderr(Stdio::null()) .spawn() .map_err(|err| { format!( "Failed to run '{}' with args '{:?}': {}", path_str, args, err ) })?; Ok(()) } fn grep_connection_info(log_path: &PathBuf) -> Result<String, String> { let regex_query = Regex::new(r".+Vault connection info:\s(.+)$").map_err(|err| { format!( "Failed to obtain the contact info of the genesis vault: {}", err ) })?; let file_content = fs::read_to_string(log_path).map_err(|err| { format!( "Failed to obtain the contact info of the genesis vault: {}", err ) })?; for (_, line) in file_content.lines().enumerate() { if let Some(contact_info) = &regex_query.captures(&line) { return Ok(format!("[{}]", contact_info[1].to_string())); } } Err("Failed to find the contact info of the genesis vault".to_string()) }
publisher.rs
/* * Copyright 2018 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ------------------------------------------------------------------------------ */ #![allow(unknown_lints)] use batch::Batch; use block::Block; use cpython::{NoArgs, ObjectProtocol, PyClone, PyDict, PyList, PyObject, Python}; use std::collections::{HashMap, HashSet, VecDeque}; use std::mem; use std::slice::Iter; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, SendError, Sender}; use std::sync::{Arc, RwLock}; use std::thread; use std::time::Duration; use execution::execution_platform::ExecutionPlatform; use ffi::py_import_class; use journal::block_manager::{BlockManager, BlockRef}; use journal::candidate_block::{CandidateBlock, CandidateBlockError}; use journal::chain_commit_state::TransactionCommitCache; use journal::chain_head_lock::ChainHeadLock; use metrics; use state::settings_view::SettingsView; use state::state_view_factory::StateViewFactory; const NUM_PUBLISH_COUNT_SAMPLES: usize = 5; const INITIAL_PUBLISH_COUNT: usize = 30; lazy_static! { static ref COLLECTOR: metrics::MetricsCollectorHandle = metrics::get_collector("sawtooth_validator.publisher"); } lazy_static! { static ref PY_BLOCK_HEADER_CLASS: PyObject = py_import_class("sawtooth_validator.protobuf.block_pb2", "BlockHeader"); static ref PY_BLOCK_BUILDER_CLASS: PyObject = py_import_class("sawtooth_validator.journal.block_builder", "BlockBuilder"); } #[derive(Debug)] pub enum InitializeBlockError { BlockInProgress, MissingPredecessor, } #[derive(Debug)] pub enum CancelBlockError { BlockNotInitialized, } #[derive(Debug)] pub enum FinalizeBlockError { BlockNotInitialized, BlockEmpty, } #[derive(Debug)] pub enum StartError { Disconnected, } #[derive(Debug)] pub enum BlockPublisherError { UnknownBlock(String), } pub trait BatchObserver: Send + Sync { fn notify_batch_pending(&self, batch: &Batch); } pub struct BlockPublisherState { pub transaction_executor: Box<ExecutionPlatform>, pub batch_observers: Vec<Box<BatchObserver>>, pub chain_head: Option<Block>, pub candidate_block: Option<CandidateBlock>, pub pending_batches: PendingBatchesPool, block_references: HashMap<String, BlockRef>, } impl BlockPublisherState { pub fn new( transaction_executor: Box<ExecutionPlatform>, batch_observers: Vec<Box<BatchObserver>>, chain_head: Option<Block>, candidate_block: Option<CandidateBlock>, pending_batches: PendingBatchesPool, ) -> Self { BlockPublisherState { batch_observers, transaction_executor, chain_head, candidate_block, pending_batches, block_references: HashMap::new(), } } pub fn get_previous_block_id(&self) -> Option<String> { let candidate_block = self.candidate_block.as_ref(); candidate_block.map(|cb| cb.previous_block_id()) } } pub struct SyncBlockPublisher { pub state: Arc<RwLock<BlockPublisherState>>, block_manager: BlockManager, batch_injector_factory: PyObject, batch_committed: PyObject, transaction_committed: PyObject, state_view_factory: StateViewFactory, block_sender: PyObject, batch_publisher: PyObject, identity_signer: PyObject, data_dir: PyObject, config_dir: PyObject, permission_verifier: PyObject, exit: Arc<Exit>, } impl Clone for SyncBlockPublisher { fn clone(&self) -> Self { let state = Arc::clone(&self.state); let gil = Python::acquire_gil(); let py = gil.python(); SyncBlockPublisher { state, block_manager: self.block_manager.clone(), batch_injector_factory: self.batch_injector_factory.clone_ref(py), batch_committed: self.batch_committed.clone_ref(py), transaction_committed: self.transaction_committed.clone_ref(py), state_view_factory: self.state_view_factory.clone(), block_sender: self.block_sender.clone_ref(py), batch_publisher: self.batch_publisher.clone_ref(py), identity_signer: self.identity_signer.clone_ref(py), data_dir: self.data_dir.clone_ref(py), config_dir: self.config_dir.clone_ref(py), permission_verifier: self.permission_verifier.clone_ref(py), exit: Arc::clone(&self.exit), } } } impl SyncBlockPublisher { pub fn on_chain_updated( &self, state: &mut BlockPublisherState, chain_head: Block, committed_batches: Vec<Batch>, uncommitted_batches: Vec<Batch>, ) { info!("Now building on top of block, {}", chain_head); let batches_len = chain_head.batches.len(); state.chain_head = Some(chain_head); let mut previous_block_option = None; if let (true, Some(previous_block)) = self.is_building_block(state) { previous_block_option = Some(previous_block); self.cancel_block(state, false); } state.pending_batches.update_limit(batches_len); state .pending_batches .rebuild(Some(committed_batches), Some(uncommitted_batches)); if let Some(previous_block) = previous_block_option { if let Err(err) = self.initialize_block(state, &previous_block, false) { error!("Unable to initialize block after canceling: {:?}", err); } } } pub fn on_chain_updated_internal( &mut self, chain_head: Block, committed_batches: Vec<Batch>, uncommitted_batches: Vec<Batch>, ) { let mut state = self .state .write() .expect("RwLock was poisoned during a write lock"); self.on_chain_updated( &mut state, chain_head, committed_batches, uncommitted_batches, ); } fn load_injectors(&self, py: Python, state_root: &str) -> Vec<PyObject> { self.batch_injector_factory .call_method(py, "create_injectors", (state_root,), None) .expect("BatchInjectorFactory has no method 'create_injectors'") .extract::<PyList>(py) .unwrap() .iter(py) .collect() } fn initialize_block( &self, state: &mut BlockPublisherState, previous_block: &Block, ref_block: bool, ) -> Result<(), InitializeBlockError> { if state.candidate_block.is_some() { warn!("Tried to initialize block but block already initialized"); return Err(InitializeBlockError::BlockInProgress); } if ref_block {
match self .block_manager .ref_block(&previous_block.header_signature) { Ok(block_ref) => { state .block_references .insert(block_ref.block_id().to_owned(), block_ref); } Err(err) => { error!("Unable to ref block! {}: {:?}", &previous_block, err); return Err(InitializeBlockError::MissingPredecessor); } } } let mut candidate_block = { let settings_view: SettingsView = self .state_view_factory .create_view(&previous_block.state_root_hash) .expect("Failed to get state view for previous block"); let max_batches = settings_view .get_setting_u32("sawtooth.publisher.max_batches_per_block", Some(0u32)) .expect("Unable to get value from settings view") .expect("Failed to return expected default") as usize; let gil = Python::acquire_gil(); let py = gil.python(); let public_key = self.get_public_key(py); let batch_injectors = self.load_injectors(py, &previous_block.state_root_hash); let kwargs = PyDict::new(py); kwargs .set_item(py, "block_num", previous_block.block_num + 1) .unwrap(); kwargs .set_item(py, "previous_block_id", &previous_block.header_signature) .unwrap(); kwargs .set_item(py, "signer_public_key", &public_key) .unwrap(); let block_header = PY_BLOCK_HEADER_CLASS .call(py, NoArgs, Some(&kwargs)) .expect("BlockHeader could not be constructed"); let block_builder = PY_BLOCK_BUILDER_CLASS .call(py, (block_header,), None) .expect("BlockBuilder could not be constructed"); let scheduler = state .transaction_executor .create_scheduler(&previous_block.state_root_hash) .expect("Failed to create new scheduler"); let committed_txn_cache = TransactionCommitCache::new(self.transaction_committed.clone_ref(py)); CandidateBlock::new( previous_block.clone(), self.batch_committed.clone_ref(py), self.transaction_committed.clone_ref(py), scheduler, committed_txn_cache, block_builder, max_batches, batch_injectors, self.identity_signer.clone_ref(py), settings_view, ) }; for batch in state.pending_batches.iter() { if candidate_block.can_add_batch() { candidate_block.add_batch(batch.clone()); } else { break; } } state.candidate_block = Some(candidate_block); Ok(()) } fn finalize_block( &self, state: &mut BlockPublisherState, consensus_data: &[u8], force: bool, ) -> Result<String, FinalizeBlockError> { let mut option_result = None; if let Some(ref mut candidate_block) = &mut state.candidate_block { option_result = Some(candidate_block.finalize(consensus_data, force)); } let res = match option_result { Some(result) => match result { Ok(finalize_result) => { state.pending_batches.update( finalize_result.remaining_batches.clone(), &finalize_result.last_batch, ); let previous_block_id = &state .candidate_block .as_ref() .expect("Failed to get candidate block, even though it is being published!") .previous_block_id(); state.candidate_block = None; match finalize_result.block { Some(block) => { // Drop Ref-D: We have finished creating this block and are about to // send it to the completer, so we can drop the ext. ref. to its // predecessor. if state.block_references.remove(previous_block_id).is_none() { error!( "Reference not found for finalized block {}", previous_block_id ); } Some(Ok( self.publish_block(&block, finalize_result.injected_batch_ids) )) } None => None, } } Err(CandidateBlockError::BlockEmpty) => Some(Err(FinalizeBlockError::BlockEmpty)), }, None => Some(Err(FinalizeBlockError::BlockNotInitialized)), }; if let Some(val) = res { val } else { self.restart_block(state); Err(FinalizeBlockError::BlockEmpty) } } fn get_block(&self, block_id: &str) -> Result<Block, BlockPublisherError> { self.block_manager .get(&[block_id]) .next() .expect("Did not return any Results, even not found blocks") .ok_or_else(|| BlockPublisherError::UnknownBlock(block_id.to_string())) } fn restart_block(&self, state: &mut BlockPublisherState) { if let Some(previous_block) = state.candidate_block.as_ref().map(|candidate| { self.get_block(&candidate.previous_block_id()) .expect("Failed to get previous block, but we are building on it.") }) { self.cancel_block(state, false); if let Err(err) = self.initialize_block(state, &previous_block, false) { error!("Initialization failed unexpectedly: {:?}", err); } } } fn summarize_block( &self, state: &mut BlockPublisherState, force: bool, ) -> Result<Vec<u8>, FinalizeBlockError> { let result = match state.candidate_block { None => Some(Err(FinalizeBlockError::BlockNotInitialized)), Some(ref mut candidate_block) => match candidate_block.summarize(force) { Ok(summary) => { if let Some(s) = summary { Some(Ok(s)) } else { None } } Err(CandidateBlockError::BlockEmpty) => Some(Err(FinalizeBlockError::BlockEmpty)), }, }; if let Some(res) = result { res } else { self.restart_block(state); Err(FinalizeBlockError::BlockEmpty) } } fn publish_block(&self, block: &PyObject, injected_batches: Vec<String>) -> String { let gil = Python::acquire_gil(); let py = gil.python(); let block: Block = block .extract(py) .expect("Got block to publish that wasn't a BlockWrapper"); let block_id = block.header_signature.clone(); self.block_sender .call_method(py, "send", (block, injected_batches), None) .map_err(|py_err| { ::pylogger::exception(py, "{:?}", py_err); }) .expect("BlockSender.send() raised an exception"); let mut blocks_published_count = COLLECTOR.counter("BlockPublisher.blocks_published_count", None, None); blocks_published_count.inc(); block_id } fn get_public_key(&self, py: Python) -> String { self.identity_signer .call_method(py, "get_public_key", NoArgs, None) .expect("IdentitySigner has no method get_public_key") .call_method(py, "as_hex", NoArgs, None) .expect("PublicKey object as no method as_hex") .extract::<String>(py) .unwrap() } fn is_building_block(&self, state: &BlockPublisherState) -> (bool, Option<Block>) { if let Some(ref candidate_block) = state.candidate_block { let previous = self .get_block(&candidate_block.previous_block_id()) .expect("Failed to get block being built on"); (true, Some(previous)) } else { (false, None) } } pub fn on_batch_received(&self, batch: Batch) { let mut state = self.state.write().expect("Lock should not be poisoned"); for observer in &state.batch_observers { observer.notify_batch_pending(&batch); } let permission_check = { let gil = Python::acquire_gil(); let py = gil.python(); self.permission_verifier .call_method(py, "is_batch_signer_authorized", (batch.clone(),), None) .expect("PermissionVerifier has no method is_batch_signer_authorized") .extract(py) .expect("PermissionVerifier.is_batch_signer_authorized did not return bool") }; if permission_check { state.pending_batches.append(batch.clone()); if let Some(ref mut candidate_block) = state.candidate_block { if candidate_block.can_add_batch() { candidate_block.add_batch(batch); } } } } fn cancel_block(&self, state: &mut BlockPublisherState, unref_block: bool) { let mut candidate_block = None; mem::swap(&mut state.candidate_block, &mut candidate_block); if let Some(mut candidate_block) = candidate_block { if unref_block { // Drop Ref-D: We cancelled the block, so we can drop the ext. ref. to its predecessor. if state .block_references .remove(&candidate_block.previous_block_id()) .is_none() { error!( "Reference not found for canceled block {}", &candidate_block.previous_block_id() ); } } candidate_block.cancel(); } } } #[derive(Clone)] pub struct BlockPublisher { pub publisher: SyncBlockPublisher, } impl BlockPublisher { #![allow(too_many_arguments)] pub fn new( block_manager: BlockManager, transaction_executor: Box<ExecutionPlatform>, batch_committed: PyObject, transaction_committed: PyObject, state_view_factory: StateViewFactory, block_sender: PyObject, batch_publisher: PyObject, chain_head: Option<Block>, identity_signer: PyObject, data_dir: PyObject, config_dir: PyObject, permission_verifier: PyObject, batch_observers: Vec<Box<BatchObserver>>, batch_injector_factory: PyObject, ) -> Self { let state = Arc::new(RwLock::new(BlockPublisherState::new( transaction_executor, batch_observers, chain_head, None, PendingBatchesPool::new(NUM_PUBLISH_COUNT_SAMPLES, INITIAL_PUBLISH_COUNT), ))); let publisher = SyncBlockPublisher { state, block_manager, batch_committed, transaction_committed, state_view_factory, block_sender, batch_publisher, identity_signer, data_dir, config_dir, permission_verifier, batch_injector_factory, exit: Arc::new(Exit::new()), }; BlockPublisher { publisher } } pub fn start(&mut self) -> IncomingBatchSender { let (batch_tx, mut batch_rx) = make_batch_queue(); let builder = thread::Builder::new().name("PublisherThread".into()); let block_publisher = self.publisher.clone(); builder .spawn(move || { loop { // Receive and process a batch match batch_rx.get(Duration::from_millis(100)) { Err(err) => match err { BatchQueueError::Timeout => { if block_publisher.exit.get() { break; } } err => panic!("Unhandled error: {:?}", err), }, Ok(batch) => { block_publisher.on_batch_received(batch); } } } warn!("PublisherThread exiting"); }) .unwrap(); batch_tx } pub fn cancel_block(&self) -> Result<(), CancelBlockError> { let mut state = self.publisher.state.write().expect("RwLock was poisoned"); if state.candidate_block.is_some() { self.publisher.cancel_block(&mut state, true); Ok(()) } else { Err(CancelBlockError::BlockNotInitialized) } } pub fn stop(&self) { self.publisher.exit.set(); } pub fn chain_head_lock(&self) -> ChainHeadLock { ChainHeadLock::new(self.publisher.clone()) } pub fn initialize_block(&self, previous_block: &Block) -> Result<(), InitializeBlockError> { let mut state = self.publisher.state.write().expect("RwLock was poisoned"); self.publisher .initialize_block(&mut state, previous_block, true) } pub fn finalize_block( &self, consensus_data: &[u8], force: bool, ) -> Result<String, FinalizeBlockError> { let mut state = self.publisher.state.write().expect("RwLock is poisoned"); self.publisher .finalize_block(&mut state, consensus_data, force) } pub fn summarize_block(&self, force: bool) -> Result<Vec<u8>, FinalizeBlockError> { let mut state = self.publisher.state.write().expect("RwLock is poisoned"); self.publisher.summarize_block(&mut state, force) } pub fn pending_batch_info(&self) -> (i32, i32) { let state = self .publisher .state .read() .expect("RwLock was poisoned during a write lock"); ( state.pending_batches.len() as i32, state.pending_batches.limit() as i32, ) } pub fn has_batch(&self, batch_id: &str) -> bool { let state = self .publisher .state .read() .expect("RwLock was poisoned during a write lock"); state.pending_batches.contains(batch_id) } } /// This queue keeps track of the batch ids so that components on the edge /// can filter out duplicates early. However, there is still an opportunity for /// duplicates to make it into this queue, which is intentional to avoid /// blocking threads trying to put/get from the queue. Any duplicates /// introduced by this must be filtered out later. pub fn make_batch_queue() -> (IncomingBatchSender, IncomingBatchReceiver) { let (sender, reciever) = channel(); let ids = Arc::new(RwLock::new(HashSet::new())); ( IncomingBatchSender::new(ids.clone(), sender), IncomingBatchReceiver::new(ids, reciever), ) } pub struct IncomingBatchReceiver { ids: Arc<RwLock<HashSet<String>>>, receiver: Receiver<Batch>, } impl IncomingBatchReceiver { pub fn new( ids: Arc<RwLock<HashSet<String>>>, receiver: Receiver<Batch>, ) -> IncomingBatchReceiver { IncomingBatchReceiver { ids, receiver } } pub fn get(&mut self, timeout: Duration) -> Result<Batch, BatchQueueError> { let batch = self.receiver.recv_timeout(timeout)?; self.ids .write() .expect("RwLock was poisoned during a write lock") .remove(&batch.header_signature); Ok(batch) } } #[derive(Clone)] pub struct IncomingBatchSender { ids: Arc<RwLock<HashSet<String>>>, sender: Sender<Batch>, } impl IncomingBatchSender { pub fn new(ids: Arc<RwLock<HashSet<String>>>, sender: Sender<Batch>) -> IncomingBatchSender { IncomingBatchSender { ids, sender } } pub fn put(&mut self, batch: Batch) -> Result<(), BatchQueueError> { let mut ids = self .ids .write() .expect("RwLock was poisoned during a write lock"); if !ids.contains(&batch.header_signature) { ids.insert(batch.header_signature.clone()); self.sender.send(batch).map_err(BatchQueueError::from) } else { Ok(()) } } pub fn has_batch(&self, batch_id: &str) -> Result<bool, BatchQueueError> { Ok(self .ids .read() .expect("RwLock was poisoned during a write lock") .contains(batch_id)) } } #[derive(Debug)] pub enum BatchQueueError { SenderError(SendError<Batch>), Timeout, MutexPoisonError(String), } impl From<SendError<Batch>> for BatchQueueError { fn from(e: SendError<Batch>) -> Self { BatchQueueError::SenderError(e) } } impl From<RecvTimeoutError> for BatchQueueError { fn from(_: RecvTimeoutError) -> Self { BatchQueueError::Timeout } } /// Ordered batches waiting to be processed pub struct PendingBatchesPool { batches: Vec<Batch>, ids: HashSet<String>, limit: QueueLimit, gauge: metrics::Gauge, } impl PendingBatchesPool { pub fn new(sample_size: usize, initial_value: usize) -> PendingBatchesPool { PendingBatchesPool { batches: Vec::new(), ids: HashSet::new(), limit: QueueLimit::new(sample_size, initial_value), gauge: COLLECTOR.gauge("BlockPublisher.pending_batch_gauge", None, None), } } pub fn len(&self) -> usize { self.batches.len() } pub fn is_empty(&self) -> bool { self.len() == 0 } pub fn iter(&self) -> Iter<Batch> { self.batches.iter() } fn contains(&self, id: &str) -> bool { self.ids.contains(id) } fn reset(&mut self) { self.batches = Vec::new(); self.ids = HashSet::new(); } pub fn append(&mut self, batch: Batch) { if !self.contains(&batch.header_signature) { self.ids.insert(batch.header_signature.clone()); self.batches.push(batch); } } /// Recomputes the list of pending batches /// /// Args: /// committed (List<Batches>): Batches committed in the current chain /// since the root of the fork switching from. /// uncommitted (List<Batches): Batches that were committed in the old /// fork since the common root. pub fn rebuild(&mut self, committed: Option<Vec<Batch>>, uncommitted: Option<Vec<Batch>>) { let committed_set = if let Some(committed) = committed { committed .iter() .map(|i| i.header_signature.clone()) .collect::<HashSet<String>>() } else { HashSet::new() }; let previous_batches = self.batches.clone(); self.reset(); // Uncommitted and pending are disjoint sets since batches can only be // committed to a chain once. if let Some(batch_list) = uncommitted { for batch in batch_list { if !committed_set.contains(&batch.header_signature) { self.append(batch); } } } for batch in previous_batches { if !committed_set.contains(&batch.header_signature) { self.append(batch); } } self.gauge.set_value(self.batches.len()); } pub fn update(&mut self, mut still_pending: Vec<Batch>, last_sent: &Batch) { let last_index = self .batches .iter() .position(|i| i.header_signature == last_sent.header_signature); let unsent = if let Some(idx) = last_index { let mut unsent = vec![]; mem::swap(&mut unsent, &mut self.batches); still_pending.extend_from_slice(unsent.split_off(idx + 1).as_slice()); still_pending } else { let mut unsent = vec![]; mem::swap(&mut unsent, &mut self.batches); unsent }; self.reset(); for batch in unsent { self.append(batch); } self.gauge.set_value(self.batches.len()); } pub fn update_limit(&mut self, consumed: usize) { self.limit.update(self.batches.len(), consumed); } pub fn limit(&self) -> usize { self.limit.get() } } struct RollingAverage { samples: VecDeque<usize>, current_average: usize, } impl RollingAverage { pub fn new(sample_size: usize, initial_value: usize) -> RollingAverage { let mut samples = VecDeque::with_capacity(sample_size); samples.push_back(initial_value); RollingAverage { samples, current_average: initial_value, } } pub fn value(&self) -> usize { self.current_average } /// Add the sample and return the updated average. pub fn update(&mut self, sample: usize) -> usize { self.samples.push_back(sample); self.current_average = self.samples.iter().sum::<usize>() / self.samples.len(); self.current_average } } struct QueueLimit { avg: RollingAverage, } const QUEUE_MULTIPLIER: usize = 10; impl QueueLimit { pub fn new(sample_size: usize, initial_value: usize) -> QueueLimit { QueueLimit { avg: RollingAverage::new(sample_size, initial_value), } } /// Use the current queue size and the number of items consumed to /// update the queue limit, if there was a significant enough change. /// Args: /// queue_length (int): the current size of the queue /// consumed (int): the number items consumed pub fn update(&mut self, queue_length: usize, consumed: usize) { if consumed > 0 { // Only update the average if either: // a. Not drained below the current average // b. Drained the queue, but the queue was not bigger than the // current running average let remainder = queue_length.checked_sub(consumed).unwrap_or(0); if remainder > self.avg.value() || consumed > self.avg.value() { self.avg.update(consumed); } } } pub fn get(&self) -> usize { // Limit the number of items to QUEUE_MULTIPLIER times the publishing // average. This allows the queue to grow geometrically, if the queue // is drained. QUEUE_MULTIPLIER * self.avg.value() } } /// Utility class for signaling that a background thread should shutdown #[derive(Default)] pub struct Exit { flag: AtomicBool, } impl Exit { pub fn new() -> Self { Exit { flag: AtomicBool::new(false), } } pub fn get(&self) -> bool { self.flag.load(Ordering::Relaxed) } pub fn set(&self) { self.flag.store(true, Ordering::Relaxed); } }
// Create Ref-D: Hold the predecessor until we are done building the new block. This ext. // ref. must be dropped either 1) after the block is finalized but before sending the block // to the completer or 2) after the block is cancelled.
main.go
package main import ( "encoding/json" "fmt" "io/ioutil" "net/http" "os" "github.com/codegangsta/cli" ) func main()
// download a list of items that match a tag func snarf(tag, clientID, url string) error { if url == "" { url = fmt.Sprintf("https://api.instagram.com/v1/tags/%s/media/recent?count=500&client_id=%s", tag, clientID) } fmt.Println("GET ", url) resp, err := http.Get(url) if err != nil { fmt.Println("error fetching list from instagram: ", err) return err } body, err := ioutil.ReadAll(resp.Body) defer resp.Body.Close() if err != nil { fmt.Println("error reading the response from instagram:", err) return err } s := struct { Assets []Asset `json:"data"` Pagination map[string]string `json:"pagination"` }{} if err := json.Unmarshal(body, &s); err != nil { fmt.Println("error unmarshaling response json:", err) fmt.Printf("%s", string(body)) return err } // prep the output directory path := fmt.Sprintf("./files/%s/", tag) if err := os.MkdirAll(path, 0755); err != nil { fmt.Println("error: could not create output directory:", err) return err } work := make(chan Asset, len(s.Assets)) done := make(chan error, 5) // launch five downloaders for i := 0; i < 5; i++ { go download(work, done, path) } // send items to workers for _, a := range s.Assets { work <- a } close(work) for i := 0; i < 5; i++ { err := <-done if err != nil { // handle } } // do it all over again if there's more! if s.Pagination["next_url"] != "" { snarf(tag, clientID, s.Pagination["next_url"]) } return nil } // trigger a download of an asset func download(assets <-chan Asset, done chan<- error, out string) error { for a := range assets { path := fmt.Sprintf("%s/%s-%s.jpg", out, a.User.Username, a.ID) imgUrl := a.Images["standard_resolution"].URL fmt.Println("downloading", imgUrl) resp, err := http.Get(imgUrl) if err != nil { fmt.Println("error: fetch img failed:", err) return err } body, err := ioutil.ReadAll(resp.Body) defer resp.Body.Close() if err != nil { fmt.Println("error: read response failed:", err) return err } if err := ioutil.WriteFile(path, body, 0644); err != nil { fmt.Println("error: could not write file:", err) return err } } done <- nil return nil }
{ app := cli.NewApp() app.Name = "igtagsnarf" app.Usage = "download assets from instagram that match a tag" app.Flags = []cli.Flag{ cli.StringFlag{ Name: "client", Value: "", Usage: "instagram client ID", }, cli.StringFlag{ Name: "tag", Value: "", Usage: "tag to search by", }, } app.Action = func(c *cli.Context) { tag, clientID := c.String("tag"), c.String("client") snarf(tag, clientID, "") } app.Run(os.Args) }
gather.rs
//! These benchmarks compare the custom built `GatheringReader`, to a chained //! cursor approach from `std`, for reading a typical scattered vector of byte //! buffers. #![warn(rust_2018_idioms)] #![feature(test)] extern crate test; // Still required, see rust-lang/rust#55133 use std::io; use std::io::{Cursor, Read}; use bytes::{BufMut, Bytes, BytesMut}; use test::Bencher; use olio::io::GatheringReader; const CHUNK_SIZE: usize = 8 * 1024; const CHUNK_COUNT: usize = 40; const READ_BUFF_SIZE: usize = 101; #[bench] fn gather_reader(b: &mut Bencher) { let buffers = create_buffers(); b.iter(move || { let len = read_gathered(&buffers).expect("read"); assert_eq!(CHUNK_SIZE * CHUNK_COUNT, len); }) } #[bench] fn gather_x_chained_cursors(b: &mut Bencher) { let buffers = create_buffers(); b.iter(move || { let len = read_chained(&buffers).expect("read"); assert_eq!(CHUNK_SIZE * CHUNK_COUNT, len); }) } #[bench] fn gather_upfront(b: &mut Bencher) { let buffers = { let mut bufs = Vec::with_capacity(CHUNK_COUNT); for b in create_buffers() { bufs.push(b) } bufs }; b.iter(|| { let buffers = buffers.clone(); // shallow let buf = gather(buffers); let cur = Cursor::new(&buf); let len = read_to_end(cur).expect("read"); assert_eq!(CHUNK_SIZE * CHUNK_COUNT, len); }) } #[bench] fn gather_upfront_read_only(b: &mut Bencher) { let buf = { let mut buffers = Vec::with_capacity(CHUNK_COUNT); for b in create_buffers() { buffers.push(b) } gather(buffers) }; b.iter(|| { let cur = Cursor::new(&buf); let len = read_to_end(cur).expect("read"); assert_eq!(CHUNK_SIZE * CHUNK_COUNT, len); }) } fn create_buffers() -> Vec<Bytes>
fn gather(buffers: Vec<Bytes>) -> Bytes { let mut newb = BytesMut::with_capacity(CHUNK_SIZE * CHUNK_COUNT); for b in buffers { newb.put_slice(&b); drop::<Bytes>(b); // Ensure ASAP drop } newb.freeze() } fn read_gathered(buffers: &[Bytes]) -> Result<usize, io::Error> { let r = GatheringReader::new(buffers); read_to_end(r) } fn read_chained(buffers: &[Bytes]) -> Result<usize, io::Error> { let mut r: Box<dyn Read> = Box::new(Cursor::new(&buffers[0])); for b in &buffers[1..] { r = Box::new(r.chain(Cursor::new(b))); } read_to_end(r) } fn read_to_end<R: Read>(mut r: R) -> Result<usize, io::Error> { let mut buf = [0u8; READ_BUFF_SIZE]; let mut total = 0; loop { let len = r.read(&mut buf)?; if len == 0 { break; } total += len; } Ok(total) }
{ let chunk: Bytes = vec![65u8; CHUNK_SIZE].into(); let mut v = Vec::new(); for _ in 0..CHUNK_COUNT { v.push(chunk.clone()); } v }
lb_https_monitor.go
/* * NSX API * * VMware NSX REST API * * API version: 1.0.0 * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) */ package loadbalancer import ( "github.com/vmware/go-vmware-nsxt/common" ) type LbHttpsMonitor struct { // The server will populate this field when returing the resource. Ignored on PUT and POST. Links []common.ResourceLink `json:"_links,omitempty"` // Schema for this resource Schema string `json:"_schema,omitempty"` // Link to this resource Self *common.SelfResourceLink `json:"_self,omitempty"` // The _revision property describes the current revision of the resource. To prevent clients from overwriting each other's changes, PUT operations must include the current _revision of the resource, which clients should obtain by issuing a GET operation. If the _revision provided in a PUT request is missing or stale, the operation will be rejected. Revision int32 `json:"_revision"` // Timestamp of resource creation CreateTime int64 `json:"_create_time,omitempty"` // ID of the user who created this resource CreateUser string `json:"_create_user,omitempty"` // Timestamp of last modification LastModifiedTime int64 `json:"_last_modified_time,omitempty"` // ID of the user who last modified this resource LastModifiedUser string `json:"_last_modified_user,omitempty"` // Protection status is one of the following: PROTECTED - the client who retrieved the entity is not allowed to modify it. NOT_PROTECTED - the client who retrieved the entity is allowed to modify it REQUIRE_OVERRIDE - the client who retrieved the entity is a super user and can modify it, but only when providing the request header X-Allow-Overwrite=true. UNKNOWN - the _protection field could not be determined for this entity. Protection string `json:"_protection,omitempty"` // Indicates system owned resource SystemOwned bool `json:"_system_owned,omitempty"` // Description of this resource Description string `json:"description,omitempty"` // Defaults to ID if not set DisplayName string `json:"display_name,omitempty"` // Unique identifier of this resource Id string `json:"id,omitempty"` // Load balancers monitor the health of backend servers to ensure traffic is not black holed. There are two types of healthchecks: active and passive. Passive healthchecks depend on failures in actual client traffic (e.g. RST from server in response to a client connection) to detect that the server or the application is down. In case of active healthchecks, load balancer itself initiates new connections (or sends ICMP ping) to the servers periodically to check their health, completely independent of any data traffic. Currently, active health monitors are supported for HTTP, HTTPS, TCP, UDP and ICMP protocols. ResourceType string `json:"resource_type"` // Opaque identifiers meaningful to the API user Tags []common.Tag `json:"tags,omitempty"` // num of consecutive checks must fail before marking it down FallCount int64 `json:"fall_count,omitempty"`
// the frequency at which the system issues the monitor check (in second) Interval int64 `json:"interval,omitempty"` // If the monitor port is specified, it would override pool member port setting for healthcheck. A port range is not supported. MonitorPort string `json:"monitor_port,omitempty"` // num of consecutive checks must pass before marking it up RiseCount int64 `json:"rise_count,omitempty"` // the number of seconds the target has in which to respond to the monitor request Timeout int64 `json:"timeout,omitempty"` // authentication depth is used to set the verification depth in the server certificates chain. CertificateChainDepth int64 `json:"certificate_chain_depth,omitempty"` // supported SSL cipher list to servers Ciphers []string `json:"ciphers,omitempty"` // client certificate can be specified to support client authentication. ClientCertificateId string `json:"client_certificate_id,omitempty"` // This flag is set to true when all the ciphers and protocols are secure. It is set to false when one of the ciphers or protocols is insecure. IsSecure bool `json:"is_secure,omitempty"` // SSL versions TLS1.1 and TLS1.2 are supported and enabled by default. SSLv2, SSLv3, and TLS1.0 are supported, but disabled by default. Protocols []string `json:"protocols,omitempty"` // String to send as part of HTTP health check request body. Valid only for certain HTTP methods like POST. RequestBody string `json:"request_body,omitempty"` // Array of HTTP request headers RequestHeaders []LbHttpRequestHeader `json:"request_headers,omitempty"` // the health check method for HTTP monitor type RequestMethod string `json:"request_method,omitempty"` // URL used for HTTP monitor RequestUrl string `json:"request_url,omitempty"` // HTTP request version RequestVersion string `json:"request_version,omitempty"` // If HTTP response body match string (regular expressions not supported) is specified (using LbHttpMonitor.response_body) then the healthcheck HTTP response body is matched against the specified string and server is considered healthy only if there is a match. If the response body string is not specified, HTTP healthcheck is considered successful if the HTTP response status code is 2xx, but it can be configured to accept other status codes as successful. ResponseBody string `json:"response_body,omitempty"` // The HTTP response status code should be a valid HTTP status code. ResponseStatusCodes []int32 `json:"response_status_codes,omitempty"` // server authentication mode ServerAuth string `json:"server_auth,omitempty"` // If server auth type is REQUIRED, server certificate must be signed by one of the trusted Certificate Authorities (CAs), also referred to as root CAs, whose self signed certificates are specified. ServerAuthCaIds []string `json:"server_auth_ca_ids,omitempty"` // A Certificate Revocation List (CRL) can be specified in the server-side SSL profile binding to disallow compromised server certificates. ServerAuthCrlIds []string `json:"server_auth_crl_ids,omitempty"` }
gspath.go
package searcher import ( "regexp" ) // GSPath contains full path to an object in Google Cloud Storage type GSPath struct { Bucket string Object string } type errorConst string // PathError is returned when a path is not a valid Google Cloud Storage path const PathError errorConst = "Not a valid gs path." func (e errorConst) Error() string { return string(e) } var re = regexp.MustCompile(`^gs://([^/]+)/(.+)$`) // ParseGCS parses a Google Cloud Storage path on the form of // gs://<bucket>/<object> and returns a GSPath with this parsed info. // If the parse doesn't match it will return a PathError. func ParseGCS(path string) (*GSPath, error)
{ m := re.FindStringSubmatch(path) if m == nil { return nil, PathError } return &GSPath{m[1], m[2]}, nil }
get.rs
use anyhow::{bail, Result}; use std::sync::Arc; use tangram_app_context::Context; use tangram_app_core::{ error::{bad_request, not_found, redirect_to_login, service_unavailable}, model::get_model_bytes, path_components, user::{authorize_user, authorize_user_for_model}, }; use tangram_id::Id; pub async fn get(request: &mut http::Request<hyper::Body>) -> Result<http::Response<hyper::Body>> { download_inner(request).await } pub async fn download_inner( request: &mut http::Request<hyper::Body>, ) -> Result<http::Response<hyper::Body>>
{ let context = Arc::clone(request.extensions().get::<Arc<Context>>().unwrap()); let app_state = &context.app.state; let model_id = if let ["repos", _, "models", model_id, "download"] = path_components(request).as_slice() { model_id.to_owned() } else { bail!("unexpected path"); }; let mut db = match app_state.database_pool.begin().await { Ok(db) => db, Err(_) => return Ok(service_unavailable()), }; let user = match authorize_user(request, &mut db, app_state.options.auth_enabled()).await? { Ok(user) => user, Err(_) => return Ok(redirect_to_login()), }; let model_id: Id = match model_id.parse() { Ok(model_id) => model_id, Err(_) => return Ok(bad_request()), }; if !authorize_user_for_model(&mut db, &user, model_id).await? { return Ok(not_found()); } let bytes = get_model_bytes(&app_state.storage, model_id).await?; let bytes = bytes.to_owned(); db.commit().await?; let response = http::Response::builder() .status(http::StatusCode::OK) .body(hyper::Body::from(bytes)) .unwrap(); Ok(response) }
hook-numba.py
# ------------------------------------------------------------------ # Copyright (c) 2020 PyInstaller Development Team. # # This file is distributed under the terms of the GNU General Public # License (version 2.0 or later). # # The full license is available in LICENSE.GPL.txt, distributed with # this software. # # SPDX-License-Identifier: GPL-2.0-or-later # ------------------------------------------------------------------ # # NumPy aware dynamic Python compiler using LLVM # https://github.com/numba/numba #
excludedimports = ["IPython", "scipy"] hiddenimports = ["llvmlite"]
# Tested with: # numba 0.26 (Anaconda 4.1.1, Windows), numba 0.28 (Linux)
execution_environment.rs
//! The execution environment public interface. mod errors; use crate::{messages::CanisterInputMessage, state_manager::StateManagerError}; pub use errors::{CanisterHeartbeatError, MessageAcceptanceError}; pub use errors::{HypervisorError, TrapCode}; use ic_base_types::{NumBytes, SubnetId}; use ic_registry_provisional_whitelist::ProvisionalWhitelist; use ic_registry_routing_table::RoutingTable; use ic_registry_subnet_type::SubnetType; use ic_types::{ ingress::{IngressStatus, WasmResult}, messages::{MessageId, SignedIngressContent, UserQuery}, user_error::UserError, Height, NumInstructions, Time, }; use rand::RngCore; use serde::{Deserialize, Serialize}; use std::{ collections::BTreeMap, sync::{Arc, RwLock}, }; /// Instance execution statistics. The stats are cumulative and /// contain measurements from the point in time when the instance was /// created up until the moment they are requested. #[derive(Serialize, Deserialize, Clone)] pub struct InstanceStats { /// Total number of (host) pages accessed (read or written) by the instance /// and loaded into the linear memory. pub accessed_pages: usize, /// Total number of (host) pages modified by the instance. /// By definition a page that has been dirtied has also been accessed, /// hence this dirtied_pages <= accessed_pages pub dirty_pages: usize, } /// Errors that can be returned when fetching the available memory on a subnet. pub enum SubnetAvailableMemoryError { InsufficientMemory { requested: NumBytes, available: NumBytes, }, } /// This struct is used to manage the view of the current amount of memory /// available on the subnet between multiple canisters executing in parallel. /// /// The problem is that when canisters with no memory reservations want to /// expand their memory consumption, we need to ensure that they do not go over /// subnet's capacity. As we execute canisters in parallel, we need to /// provide them with a way to view the latest state of memory availble in a /// thread safe way. Hence, we use `Arc<RwLock<>>` here. #[derive(Serialize, Deserialize, Clone)] pub struct SubnetAvailableMemory(Arc<RwLock<NumBytes>>); impl SubnetAvailableMemory { pub fn new(amount: NumBytes) -> Self { Self(Arc::new(RwLock::new(amount))) } /// Try to use some memory capacity and fail if not enough is available pub fn try_decrement(&self, requested: NumBytes) -> Result<(), SubnetAvailableMemoryError> { let mut available = self.0.write().unwrap(); if requested <= *available { *available -= requested; Ok(()) } else { Err(SubnetAvailableMemoryError::InsufficientMemory { requested, available: *available, }) } } } /// ExecutionEnvironment is the component responsible for executing messages /// on the IC. pub trait ExecutionEnvironment: Sync + Send { /// Type modelling the replicated state. /// /// Should typically be /// `ic_replicated_state::ReplicatedState`. // Note [Associated Types in Interfaces] type State; /// Type modelling the canister state. /// /// Should typically be /// `ic_replicated_state::CanisterState`. // Note [Associated Types in Interfaces] type CanisterState; /// Executes a message sent to a subnet. // // A deterministic cryptographically secure pseudo-random number generator // is created per round and per thread and passed to this method to be used // while responding to randomness requests (i.e. raw_rand). Using the type // "&mut RngCore" imposes a problem with our usage of "mockall" library in // the test_utilities. Mockall's doc states: "The only restrictions on // mocking generic methods are that all generic parameters must be 'static, // and generic lifetime parameters are not allowed." Hence, the type of the // parameter is "&mut (dyn RngCore + 'static)". #[allow(clippy::too_many_arguments)] fn execute_subnet_message( &self, msg: CanisterInputMessage, state: Self::State, instructions_limit: NumInstructions, rng: &mut (dyn RngCore + 'static), provisional_whitelist: &ProvisionalWhitelist, subnet_available_memory: SubnetAvailableMemory, ) -> Self::State; /// Executes a message sent to a canister. #[allow(clippy::too_many_arguments)] fn execute_canister_message( &self, canister_state: Self::CanisterState, instructions_limit: NumInstructions, msg: CanisterInputMessage, time: Time, routing_table: Arc<RoutingTable>, subnet_records: Arc<BTreeMap<SubnetId, SubnetType>>, subnet_available_memory: SubnetAvailableMemory, ) -> ExecResult<ExecuteMessageResult<Self::CanisterState>>; /// Asks the canister if it is willing to accept the provided ingress /// message. fn should_accept_ingress_message( &self, state: Arc<Self::State>, provisional_whitelist: &ProvisionalWhitelist, ingress: &SignedIngressContent, ) -> Result<(), MessageAcceptanceError>; /// Executes a heartbeat of a given canister. fn execute_canister_heartbeat( &self, canister_state: Self::CanisterState, instructions_limit: NumInstructions, routing_table: Arc<RoutingTable>, subnet_records: Arc<BTreeMap<SubnetId, SubnetType>>, time: Time, subnet_available_memory: SubnetAvailableMemory, ) -> ExecResult<( Self::CanisterState, NumInstructions, Result<NumBytes, CanisterHeartbeatError>, )>; /// Look up the current amount of memory available on the subnet. /// EXC-185 will make this method obsolete. fn subnet_available_memory(&self, state: &Self::State) -> NumBytes; } /// The data structure returned by /// `ExecutionEnvironment.execute_canister_message()`. pub struct ExecuteMessageResult<CanisterState> { /// The `CanisterState` after message execution pub canister: CanisterState, /// The amount of instructions left after message execution. This must be <= /// to the instructions_limit that `execute_canister_message()` was called /// with. pub num_instructions_left: NumInstructions, /// Optional status for an Ingress message if available. pub ingress_status: Option<(MessageId, IngressStatus)>, /// The size of the heap delta the canister produced pub heap_delta: NumBytes, } /// An underlying struct/helper for implementing select() on multiple /// AsyncResult<T>'s. If an AsyncResult is really an ongoing computation, we /// have to obtain its result from a channel. However, some AsyncResults are of /// type EarlyResult, which only emulates being async, but in reality is a ready /// value (mostly used for early errors). In such case, there is no channel /// present and we can simply return the value without waiting. pub enum TrySelect<T> { EarlyResult(T), // These Box<Any>'s are here only to hide internal data types from the interfaces crate. // These are known types (crossbeam channnel, WasmExecutionOutput), // and if we restructure our dependency tree we may put the real types here. Channel( Box<dyn std::any::Any + 'static>, Box<dyn FnOnce(Box<dyn std::any::Any + 'static>) -> T>, ), } /// An execution can finish successfully or get interrupted (out of cycles). pub enum ExecResultVariant<T> { Completed(T), Interrupted(Box<dyn InterruptedExec<T>>), } // Most likely these traits can be moved to embedders crate if we restructure // ExecutionEnvironment a little. /// An async result which allows for sync wait and select. pub trait AsyncResult<T> { fn get(self: Box<Self>) -> ExecResultVariant<T>; fn try_select(self: Box<Self>) -> TrySelect<T>; } /// Interrupted execution. Can be resumed or canceled. pub trait InterruptedExec<T> { fn resume(self: Box<Self>, cycles_topup: NumInstructions) -> ExecResult<T>; fn cancel(self: Box<Self>) -> ExecResult<T>; } impl<A: 'static> dyn InterruptedExec<A> { /// Add post-processing on the output received after resume/cancel. pub fn and_then<B: 'static, F: 'static + FnOnce(A) -> B>( self: Box<Self>, f: F, ) -> Box<dyn InterruptedExec<B>> { Box::new(ResumeTokenWrapper { resume_token: self, f, }) } } // A wrapper which allows for post processing of the ExecResult returned by // original resume/cancel. struct ResumeTokenWrapper<A, B, F: FnOnce(A) -> B> { resume_token: Box<dyn InterruptedExec<A>>, f: F, } impl<A, B, F> InterruptedExec<B> for ResumeTokenWrapper<A, B, F> where A: 'static, B: 'static, F: 'static + FnOnce(A) -> B, { fn resume(self: Box<Self>, cycles_topup: NumInstructions) -> ExecResult<B> { self.resume_token.resume(cycles_topup).and_then(self.f) } fn cancel(self: Box<Self>) -> ExecResult<B> { self.resume_token.cancel().and_then(self.f) } } /// Generic async result of an execution. pub struct ExecResult<T> { result: Box<dyn AsyncResult<T>>, } impl<T> ExecResult<T> { pub fn
(result: Box<dyn AsyncResult<T>>) -> Self { Self { result } } /// Wait for the result pub fn get(self) -> ExecResultVariant<T> { self.result.get() } /// Wait for the final result without allowing for a pause. /// If pause occurs, the execution is automatically cancelled. pub fn get_no_pause(self) -> T { match self.result.get() { ExecResultVariant::Completed(x) => x, ExecResultVariant::Interrupted(resume_token) => { if let ExecResultVariant::Completed(x) = resume_token.cancel().get() { x } else { panic!("Unexpected response from execution cancel request"); } } } } /// This function allows to extract an underlying channel to perform a /// select. It is used to implement 'ic_embedders::ExecSelect' and is /// not meant to be used explicitly. pub fn try_select(self) -> TrySelect<T> { self.result.try_select() } } impl<A: 'static> ExecResult<A> { /// Add post-processing on the result. pub fn and_then<B: 'static, F: 'static + FnOnce(A) -> B>(self, f: F) -> ExecResult<B> { ExecResult::new(Box::new(ExecResultWrapper { result: self, f })) } } // A wrapper which allows for post processing of the original ExecResult. struct ExecResultWrapper<A, B, F: FnOnce(A) -> B> { result: ExecResult<A>, f: F, } impl<A, B, F> AsyncResult<B> for ExecResultWrapper<A, B, F> where A: 'static, B: 'static, F: 'static + FnOnce(A) -> B, { fn get(self: Box<Self>) -> ExecResultVariant<B> { match self.result.get() { ExecResultVariant::Completed(x) => ExecResultVariant::Completed((self.f)(x)), ExecResultVariant::Interrupted(resume_token) => { ExecResultVariant::Interrupted(resume_token.and_then(self.f)) } } } fn try_select(self: Box<Self>) -> TrySelect<B> { let f = self.f; match self.result.try_select() { TrySelect::EarlyResult(res) => TrySelect::EarlyResult(f(res)), TrySelect::Channel(a, p) => TrySelect::Channel(a, Box::new(move |x| f(p(x)))), } } } /// Sync result implementing async interface. pub struct EarlyResult<T> { result: T, } impl<T: 'static> EarlyResult<T> { #[allow(clippy::new_ret_no_self)] pub fn new(result: T) -> ExecResult<T> { ExecResult { result: Box::new(Self { result }), } } } impl<T: 'static> AsyncResult<T> for EarlyResult<T> { fn get(self: Box<Self>) -> ExecResultVariant<T> { ExecResultVariant::Completed(self.result) } fn try_select(self: Box<Self>) -> TrySelect<T> { TrySelect::EarlyResult(self.result) } } pub type HypervisorResult<T> = Result<T, HypervisorError>; /// Interface for the component to execute queries on canisters. It can be used /// by the HttpHandler and other system components to execute queries. pub trait QueryHandler: Send + Sync { /// Type of state managed by StateReader. /// /// Should typically be `ic_replicated_state::ReplicatedState`. // Note [Associated Types in Interfaces] type State; /// Handle a query of type `UserQuery` which was sent by an end user. fn query( &self, q: UserQuery, processing_state: Arc<Self::State>, data_certificate: Vec<u8>, ) -> Result<WasmResult, UserError>; } /// Errors that can be returned when reading/writing from/to ingress history. #[derive(Clone, Debug, PartialEq, Eq)] pub enum IngressHistoryError { StateRemoved(Height), StateNotAvailableYet(Height), } impl From<StateManagerError> for IngressHistoryError { fn from(source: StateManagerError) -> Self { match source { StateManagerError::StateRemoved(height) => Self::StateRemoved(height), StateManagerError::StateNotCommittedYet(height) => Self::StateNotAvailableYet(height), } } } /// Interface for reading the history of ingress messages. pub trait IngressHistoryReader: Send + Sync { /// Returns a function that can be used to query the status for a given /// `message_id` using the latest execution state. fn get_latest_status(&self) -> Box<dyn Fn(&MessageId) -> IngressStatus>; /// Return a function that can be used to query the status for a given /// `message_id` using the state at given `height`. /// /// Return an error if the the state is not available. fn get_status_at_height( &self, height: Height, ) -> Result<Box<dyn Fn(&MessageId) -> IngressStatus>, IngressHistoryError>; } /// Interface for updating the history of ingress messages. pub trait IngressHistoryWriter: Send + Sync { /// Type of state this Writer can update. /// /// Should typically be `ic_replicated_state::ReplicatedState`. // Note [Associated Types in Interfaces] type State; /// Allows to set status on a message. /// /// The allowed status transitions are: /// * "None" -> {"Received", "Processing", "Completed", "Failed"} /// * "Received" -> {"Processing", "Completed", "Failed"} /// * "Processing" -> {"Processing", "Completed", "Failed"} fn set_status(&self, state: &mut Self::State, message_id: MessageId, status: IngressStatus); } /// A trait for providing all necessary imports to a Wasm module. pub trait SystemApi { /// Stores the execution error, so that the user can evaluate it later. fn set_execution_error(&mut self, error: HypervisorError); /// Returns the reference to the execution error. fn get_execution_error(&self) -> Option<&HypervisorError>; /// Returns the amount of available instructions. fn get_available_num_instructions(&self) -> NumInstructions; /// Returns the stable memory delta that the canister produced fn get_stable_memory_delta_pages(&self) -> usize; /// Sets the amount of available instructions. fn set_available_num_instructions(&mut self, num_instructions: NumInstructions); /// Copies `size` bytes starting from `offset` inside the opaque caller blob /// and copies them to heap[dst..dst+size]. The caller is the canister /// id in case of requests or the user id in case of an ingress message. fn ic0_msg_caller_copy( &self, dst: u32, offset: u32, size: u32, heap: &mut [u8], ) -> HypervisorResult<()>; /// Returns the size of the opaque caller blob. fn ic0_msg_caller_size(&self) -> HypervisorResult<u32>; /// Returns the size of msg.payload. fn ic0_msg_arg_data_size(&self) -> HypervisorResult<u32>; /// Copies `length` bytes from msg.payload[offset..offset+size] to /// memory[dst..dst+size]. fn ic0_msg_arg_data_copy( &self, dst: u32, offset: u32, size: u32, heap: &mut [u8], ) -> HypervisorResult<()>; /// Used to look up the size of the method_name that the message wants to /// call. Can only be called in the context of inspecting messages. fn ic0_msg_method_name_size(&self) -> HypervisorResult<u32>; /// Used to copy the method_name that the message wants to call to heap. Can /// only be called in the context of inspecting messages. fn ic0_msg_method_name_copy( &self, dst: u32, offset: u32, size: u32, heap: &mut [u8], ) -> HypervisorResult<()>; // If the canister calls this method, then the message will be accepted // otherwise rejected. Can only be called in the context of accepting // messages. fn ic0_accept_message(&mut self) -> HypervisorResult<()>; /// Copies the data referred to by src/size out of the canister and appends /// it to the (initially empty) data reply. fn ic0_msg_reply_data_append( &mut self, src: u32, size: u32, heap: &[u8], ) -> HypervisorResult<()>; /// Replies to the sender with the data assembled using /// `msg_reply_data_append`. fn ic0_msg_reply(&mut self) -> HypervisorResult<()>; /// Returns the reject code, if the current function is invoked as a /// reject callback. /// /// It returns the special “no error” code 0 if the callback is not invoked /// as a reject callback fn ic0_msg_reject_code(&self) -> HypervisorResult<i32>; /// Replies to sender with an error message fn ic0_msg_reject(&mut self, src: u32, size: u32, heap: &[u8]) -> HypervisorResult<()>; /// Returns the length of the reject message in bytes. /// /// # Panics /// /// This traps if not invoked from a reject callback. fn ic0_msg_reject_msg_size(&self) -> HypervisorResult<u32>; /// Copies length bytes from self.reject_msg[offset..offset+size] to /// memory[dst..dst+size] /// /// # Panics /// /// This traps if offset+size is greater than the size of the reject /// message, or if dst+size exceeds the size of the Wasm memory, or if not /// called from inside a reject callback. fn ic0_msg_reject_msg_copy( &self, dst: u32, offset: u32, size: u32, heap: &mut [u8], ) -> HypervisorResult<()>; /// Returns the size of the blob corresponding to the id of the canister. fn ic0_canister_self_size(&self) -> HypervisorResult<usize>; /// Copies `size` bytes starting from `offset` in the id blob of the /// canister to heap[dst..dst+size]. fn ic0_canister_self_copy( &mut self, dst: u32, offset: u32, size: u32, heap: &mut [u8], ) -> HypervisorResult<()>; /// Returns the size of the blob corresponding to the id of the controller. fn ic0_controller_size(&self) -> HypervisorResult<usize>; /// Copies `size` bytes starting from `offset` in the id blob of the /// controller to heap[dst..dst+size]. fn ic0_controller_copy( &mut self, dst: u32, offset: u32, size: u32, heap: &mut [u8], ) -> HypervisorResult<()>; /// Outputs the specified bytes on the heap as a string on STDOUT. fn ic0_debug_print(&self, src: u32, size: u32, heap: &[u8]); /// Just like `exec` in C replaces the current process with a new process, /// this system call replaces the current canister with a new canister. fn ic0_exec(&mut self, bytes: Vec<u8>, payload: Vec<u8>) -> HypervisorError; /// Traps, with a possibly helpful message fn ic0_trap(&self, src: u32, size: u32, heap: &[u8]) -> HypervisorError; /// Creates a pending inter-canister message that will be scheduled if the /// current message execution completes successfully. #[allow(clippy::too_many_arguments)] fn ic0_call_simple( &mut self, callee_src: u32, callee_size: u32, method_name_src: u32, method_name_len: u32, reply_fun: u32, reply_env: u32, reject_fun: u32, reject_env: u32, data_src: u32, data_len: u32, heap: &[u8], ) -> HypervisorResult<i32>; /// Begins assembling a call to the canister specified by /// callee_src/callee_size at method name_src/name_size. Two mandatory /// callbacks are recorded which will be invoked on success and error /// respectively. /// /// Subsequent calls to other `call_*` apis set further attributes of this /// call until the call is concluded (with `ic0.call_perform) or discarded /// (by returning without calling `ic0.call_perform` or by starting a new /// call with `ic0.call_new`). #[allow(clippy::too_many_arguments)] fn ic0_call_new( &mut self, callee_src: u32, callee_size: u32, name_src: u32, name_len: u32, reply_fun: u32, reply_env: u32, reject_fun: u32, reject_env: u32, heap: &[u8], ) -> HypervisorResult<()>; /// Appends the specified bytes to the argument of the call. Initially, the /// argument is empty. This can be called multiple times between /// `ic0.call_new` and `ic0.call_perform`. fn ic0_call_data_append(&mut self, src: u32, size: u32, heap: &[u8]) -> HypervisorResult<()>; /// Specifies the closure to be called if the reply/reject closures trap. /// Can be called at most once between `ic0.call_new` and /// `ic0.call_perform`. /// /// See https://sdk.dfinity.org/docs/interface-spec/index.html#system-api-call fn ic0_call_on_cleanup(&mut self, fun: u32, env: u32) -> HypervisorResult<()>; /// Adds cycles to a call by moving them from the canister's balance onto /// the call under construction. The cycles are deducted immediately /// from the canister's balance and moved back if the call cannot be /// performed (e.g. if `ic0.call_perform` signals an error or if the /// canister invokes `ic0.call_new` or returns without invoking /// `ic0.call_perform`). /// /// This traps if trying to transfer more cycles than are in the current /// balance of the canister. fn ic0_call_cycles_add(&mut self, amount: u64) -> HypervisorResult<()>; /// This call concludes assembling the call. It queues the call message to /// the given destination, but does not actually act on it until the current /// WebAssembly function returns without trapping. /// /// If the system returns 0, then the system was able to enqueue the call, /// if a non-zero value is returned then the call could not be enqueued. /// /// After `ic0.call_perform` and before the next `ic0.call_new`, all other /// `ic0.call_*` calls trap. fn ic0_call_perform(&mut self) -> HypervisorResult<i32>; /// Returns the current size of the stable memory in WebAssembly pages. fn ic0_stable_size(&self) -> HypervisorResult<u32>; /// Tries to grow the stable memory by additional_pages many pages /// containing zeros. /// If successful, returns the previous size of the memory (in pages). /// Otherwise, returns -1 fn ic0_stable_grow(&mut self, additional_pages: u32) -> HypervisorResult<i32>; /// Copies the data referred to by offset/size out of the stable memory and /// replaces the corresponding bytes starting at dst in the canister memory. /// /// This system call traps if dst+size exceeds the size of the WebAssembly /// memory or offset+size exceeds the size of the stable memory. fn ic0_stable_read( &self, dst: u32, offset: u32, size: u32, heap: &mut [u8], ) -> HypervisorResult<()>; /// Copies the data referred to by src/size out of the canister and replaces /// the corresponding segment starting at offset in the stable memory. /// /// This system call traps if src+size exceeds the size of the WebAssembly /// memory or offset+size exceeds the size of the stable memory. fn ic0_stable_write( &mut self, offset: u32, src: u32, size: u32, heap: &[u8], ) -> HypervisorResult<()>; fn ic0_time(&self) -> HypervisorResult<Time>; /// This system call is not part of the public spec and used by the /// hypervisor, when execution runs out of instructions. Higher levels /// can decide how to proceed, by either providing more instructions /// or aborting the execution (typically with an out-of-instructions /// error). fn out_of_instructions(&self) -> HypervisorResult<NumInstructions>; /// This system call is not part of the public spec. It's called after a /// native `memory.grow` has been called to check whether there's enough /// available memory left. fn update_available_memory( &mut self, native_memory_grow_res: i32, additional_pages: u32, ) -> HypervisorResult<i32>; /// Returns the current balance in cycles. fn ic0_canister_cycle_balance(&self) -> HypervisorResult<u64>; /// Cycles sent in the current call and still available. fn ic0_msg_cycles_available(&self) -> HypervisorResult<u64>; /// Cycles that came back with the response, as a refund. fn ic0_msg_cycles_refunded(&self) -> HypervisorResult<u64>; /// This moves cycles from the call to the canister balance. /// It can be called multiple times, each time adding more cycles to the /// balance. /// /// It moves no more cycles than `max_amount`. /// /// It moves no more cycles than available according to /// `ic0.msg_cycles_available`, and /// /// The canister balance afterwards does not exceed /// maximum amount of cycles it can hold (public spec refers to this /// constant as MAX_CANISTER_BALANCE) minus any possible outstanding /// balances. However, canisters on system subnets have no balance /// limit. /// /// EXE-117: the last point is not properly handled yet. In particular, a /// refund can come back to the canister after this call finishes which /// causes the canister's balance to overflow. fn ic0_msg_cycles_accept(&mut self, max_amount: u64) -> HypervisorResult<u64>; /// Sets the certified data for the canister. /// See: https://sdk.dfinity.org/docs/interface-spec/index.html#system-api-certified-data fn ic0_certified_data_set(&mut self, src: u32, size: u32, heap: &[u8]) -> HypervisorResult<()>; /// If run in non-replicated execution (i.e. query), /// returns 1 if the data certificate is present, 0 otherwise. /// If run in replicated execution (i.e. an update call or a certified /// query), returns 0. fn ic0_data_certificate_present(&self) -> HypervisorResult<i32>; /// Returns the size of the data certificate if it is present /// (i.e. data_certificate_present returns 1). /// Traps if data_certificate_present returns 0. fn ic0_data_certificate_size(&self) -> HypervisorResult<i32>; /// Copies the data certificate into the heap if it is present /// (i.e. data_certificate_present returns 1). /// Traps if data_certificate_present returns 0. fn ic0_data_certificate_copy( &self, dst: u32, offset: u32, size: u32, heap: &mut [u8], ) -> HypervisorResult<()>; /// Returns the current status of the canister. `1` indicates /// running, `2` indicates stopping, and `3` indicates stopped. fn ic0_canister_status(&self) -> HypervisorResult<u32>; /// Mints the `amount` cycles /// Adds cycles to the canister's balance. /// /// Adds no more cycles than `amount`. /// /// The canister balance afterwards does not exceed /// maximum amount of cycles it can hold. /// However, canisters on system subnets have no balance limit. /// /// Returns the amount of cycles added to the canister's balance. fn ic0_mint_cycles(&mut self, amount: u64) -> HypervisorResult<u64>; }
new
fixtureListUpdate.js
// @flow import { wait } from 'react-testing-library'; import { loadPlugins } from 'react-plugin'; import { cleanup, getPluginState, mockState, mockCall } from '../../../../testHelpers/plugin'; import { createFixtureListUpdateResponse } from '../../testHelpers'; import { register } from '../..'; import type { RendererId } from 'react-cosmos-shared2/renderer'; import type { RendererCoordinatorState } from '../..'; afterEach(cleanup); const fixtures = ['ein.js', 'zwei.js', 'drei.js']; const state: RendererCoordinatorState = { connectedRendererIds: ['mockRendererId1', 'mockRendererId2'], primaryRendererId: 'mockRendererId1', fixtures: ['ein.js', 'zwei.js', 'drei.js'], fixtureState: null
mockState('router', { urlParams: {} }); } function loadTestPlugins() { loadPlugins({ state: { rendererCoordinator: state } }); } function mockFixtureListUpdateResponse(rendererId: RendererId) { mockCall( 'rendererCoordinator.receiveResponse', createFixtureListUpdateResponse(rendererId, [...fixtures, 'vier.js']) ); } it('updates fixtures in renderer state', async () => { registerTestPlugins(); loadTestPlugins(); mockFixtureListUpdateResponse('mockRendererId1'); await wait(() => expect(getPluginState('rendererCoordinator').fixtures).toEqual([ ...fixtures, 'vier.js' ]) ); }); it('ignores update from secondary renderer', async () => { registerTestPlugins(); loadTestPlugins(); mockFixtureListUpdateResponse('mockRendererId2'); await wait(() => expect(getPluginState('rendererCoordinator').fixtures).toEqual(fixtures) ); });
}; function registerTestPlugins() { register();
detection_opencv.py
# Detection thread # Models are from http://alereimondo.no-ip.org/OpenCV/34/ # Check it out, there are plenty of them! # Useful and fast ''' If you wanna train your own models check this out! https://docs.opencv.org/3.4/dc/d88/tutorial_traincascade.html ''' # Code from https://docs.opencv.org/3.4/d7/d8b/tutorial_py_face_detection.html # imports import utils.logging_data as LOG import cv2 import imutils import os import sys import threading import numpy as np import re import time import datetime #Detection # Class that handle detection in own thread class Detection(threading.Thread): face_cascade = [] facial_features_cascade = [] # Flipp testing camera flipp_test_nr = 1 flipp_test_degree = 90 do_flipp_test = False flipp_test_long_intervall = 12 # Calculate time start_time = None end_time = None # Thread sleep times sleep_time = 0.1 LONG_SLEEP = 2 SHORT_SLEEP = 0.5 # Number of detection fails to start energy save no_face_count = 0 NO_FACE_MAX = 10 Loaded_model = False # Initiate thread # parameters name, and shared_variables reference def __init__(self, name=None, shared_variables = None): threading.Thread.__init__(self) self.name = name self.shared_variables = shared_variables self.sleep_time = self.SHORT_SLEEP self.index = int(name) LOG.info("Create dlib detection" + str(self.index), "SYSTEM-"+self.shared_variables.name) #Run #Detection function def run(self): # Load model LOG.info("Loading OPENCV model" + str(self.index),"SYSTEM-"+self.shared_variables.name) face_cascade = cv2.CascadeClassifier('utils/haarcascade_frontalface_default.xml') facial_features_cascade = cv2.CascadeClassifier('utils/haarcascade_facial_features.xml') LOG.info("Start opencv detections " + str(self.index),"SYSTEM-"+self.shared_variables.name) # Start Loop while self.shared_variables.system_running: self.start_time = datetime.datetime.now() frame = self.shared_variables.frame[self.index] if self.do_flipp_test: frame = imutils.rotate(frame, self.flipp_test_degree*self.flipp_test_nr) # Do detection if frame is not None : gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) landmarksAndFaces = [] face_patches = face_cascade.detectMultiScale(gray, 1.3, 5) # if found faces if len(face_patches) > 0: landmarksAndFaces.append(face_patches[0].tolist()) for (x,y,w,h) in face_patches: roi_gray = gray[y:y+h, x:x+w] # To dont use landmarks, instead use boxes for (ex,ey,ew,eh) in facial_features_cascade.detectMultiScale(roi_gray): landmarksAndFaces.append( [x + ex, y + ey, ew, eh] ) self.no_face_count = 0 self.shared_variables.face_found[self.index] = True # Save boxes self.shared_variables.face_box[self.index] = landmarksAndFaces #self.shared_variables.detection_box[self.index] = face_box self.shared_variables.set_detection_box(landmarksAndFaces, self.index) # Do flipp test on detection if self.shared_variables.flipp_test[self.index] and self.do_flipp_test: # save flipp as success degree = self.shared_variables.flipp_test_degree[self.index] + self.flipp_test_nr*self.flipp_test_degree degree = degree - (degree % 360)*360 self.shared_variables.flipp_test_degree[self.index] = degree # log frame change LOG.log("Flipp test successful add degree :" + str(self.flipp_test_nr*self.flipp_test_degree),self.shared_variables.name) # end flipp test self.do_flipp_test = False self.flipp_test_nr = 1 else: # No face self.shared_variables.face_found[self.index] = False # if max face misses has been done, stop tracking and do less detections if self.no_face_count >= self.NO_FACE_MAX : # do flipp test if self.shared_variables.flipp_test: # doing flipp test if self.do_flipp_test: self.flipp_test_nr = self.flipp_test_nr + 1 # flipp test did not find anything if self.flipp_test_nr*self.flipp_test_degree >= 360:
else: self.do_flipp_test = True else: #self.sleep_time = self.LONG_SLEEP #self.shared_variables.tracking_running = False #LOG.log("Initiate energy save",self.shared_variables.name) pass else: self.no_face_count = self.no_face_count + 1 if self.no_face_count >= self.flipp_test_long_intervall and self.shared_variables.flipp_test[self.index]: self.no_face_count = 0 self.end_time = datetime.datetime.now() # Debug detection time if self.shared_variables.debug: LOG.debug('OPENCV Detection time:' + str(self.end_time - self.start_time),self.shared_variables.name) time.sleep(self.sleep_time) # sleep if wanted LOG.info("Ending OPENCV detection " + str(self.index), "SYSTEM-"+self.shared_variables.name )
self.do_flipp_test = False self.flipp_test_nr = 1 self.sleep_time = self.LONG_SLEEP
preview.rs
use { super::*, crate::{ app::{AppContext, LineNumber}, command::ScrollCommand, display::*, errors::ProgramError, hex::HexView, image::ImageView, pattern::InputPattern, skin::PanelSkin, syntactic::SyntacticView, task_sync::Dam, }, crossterm::{cursor, QueueableCommand}, std::{ io, path::Path, }, termimad::{Area, CropWriter, SPACE_FILLING}, }; pub enum Preview { Image(ImageView), Syntactic(SyntacticView), Hex(HexView), ZeroLen(ZeroLenFileView), IoError(io::Error), } impl Preview { /// build a preview, never failing (but the preview can be Preview::IOError). /// If the prefered mode can't be applied, an other mode is chosen. pub fn new( path: &Path, prefered_mode: Option<PreviewMode>, con: &AppContext, ) -> Self { match prefered_mode { Some(PreviewMode::Hex) => Self::hex(path), Some(PreviewMode::Image) => Self::image(path), Some(PreviewMode::Text) => Self::unfiltered_text(path, con), None => { // automatic behavior: image, text, hex ImageView::new(path) .map(Self::Image) .unwrap_or_else(|_| Self::unfiltered_text(path, con)) } } } /// try to build a preview with the designed mode, return an error /// if that wasn't possible pub fn with_mode( path: &Path, mode: PreviewMode, con: &AppContext, ) -> Result<Self, ProgramError> { match mode { PreviewMode::Hex => { Ok(HexView::new(path.to_path_buf()).map(Self::Hex)?) } PreviewMode::Image => { ImageView::new(path).map(Self::Image) } PreviewMode::Text => { Ok( SyntacticView::new(path, InputPattern::none(), &mut Dam::unlimited(), con) .transpose() .expect("syntactic view without pattern shouldn't be none") .map(Self::Syntactic)?, ) } } } /// build an image view, unless the file can't be interpreted /// as an image, in which case a hex view is used pub fn image(path: &Path) -> Self { ImageView::new(path) .ok() .map(Self::Image) .unwrap_or_else(|| Self::hex(path)) } /// build a text preview (maybe with syntaxic coloring) if possible, /// a hex (binary) view if content isnt't UTF8, a ZeroLen file if there's /// no length (it's probably a linux pseudofile) or a IOError when /// there's a IO problem pub fn unfiltered_text( path: &Path, con: &AppContext, ) -> Self { match SyntacticView::new(path, InputPattern::none(), &mut Dam::unlimited(), con) { Ok(Some(sv)) => Self::Syntactic(sv), Err(ProgramError::ZeroLenFile | ProgramError::UnmappableFile) => { debug!("zero len or unmappable file - check if system file"); Self::ZeroLen(ZeroLenFileView::new(path.to_path_buf())) } // not previewable as UTF8 text // we'll try reading it as binary _ => Self::hex(path), } } /// try to build a filtered text view. Will return None if /// the dam gets an event before it's built pub fn filtered( &self, path: &Path, pattern: InputPattern, dam: &mut Dam, con: &AppContext, ) -> Option<Self> { match self { Self::Syntactic(_) => { match SyntacticView::new(path, pattern, dam, con) { // normal finished loading Ok(Some(sv)) => Some(Self::Syntactic(sv)), // interrupted search Ok(None) => None, // not previewable as UTF8 text // we'll try reading it as binary Err(_) => Some(Self::hex(path)), } } _ => None, // not filterable } } /// return a hex_view, suitable for binary, or Self::IOError /// if there was an error pub fn hex(path: &Path) -> Self { match HexView::new(path.to_path_buf()) { Ok(reader) => Self::Hex(reader), Err(e) => { // it's unlikely as the file isn't open at this point warn!("error while previewing {:?} : {:?}", path, e); Self::IoError(e) } } } /// return the preview_mode, or None if we're on IOError pub fn get_mode(&self) -> Option<PreviewMode> { match self { Self::Image(_) => Some(PreviewMode::Image), Self::Syntactic(_) => Some(PreviewMode::Text), Self::ZeroLen(_) => Some(PreviewMode::Text), Self::Hex(_) => Some(PreviewMode::Hex), Self::IoError(_) => None, } } pub fn pattern(&self) -> InputPattern { match self { Self::Syntactic(sv) => sv.pattern.clone(), _ => InputPattern::none(), } } pub fn try_scroll( &mut self, cmd: ScrollCommand, ) -> bool { match self { Self::Syntactic(sv) => sv.try_scroll(cmd), Self::Hex(hv) => hv.try_scroll(cmd), _ => false, } } pub fn is_filterable(&self) -> bool { matches!(self, Self::Syntactic(_)) } pub fn get_selected_line(&self) -> Option<String> { match self { Self::Syntactic(sv) => sv.get_selected_line(), _ => None, } } pub fn get_selected_line_number(&self) -> Option<LineNumber> { match self { Self::Syntactic(sv) => sv.get_selected_line_number(), _ => None, } } pub fn try_select_line_number(&mut self, number: usize) -> bool { match self { Self::Syntactic(sv) => sv.try_select_line_number(number), _ => false, } } pub fn unselect(&mut self) { if let Self::Syntactic(sv) = self { sv.unselect(); } } pub fn try_select_y(&mut self, y: u16) -> bool { match self { Self::Syntactic(sv) => sv.try_select_y(y), _ => false, } } pub fn move_selection(&mut self, dy: i32, cycle: bool) { match self { Self::Syntactic(sv) => sv.move_selection(dy, cycle), Self::Hex(hv) => { hv.try_scroll(ScrollCommand::Lines(dy)); } _ => {} } } pub fn select_first(&mut self) { match self { Self::Syntactic(sv) => sv.select_first(), Self::Hex(hv) => hv.select_first(), _ => {} } } pub fn select_last(&mut self) { match self { Self::Syntactic(sv) => sv.select_last(), Self::Hex(hv) => hv.select_last(), _ => {} } } pub fn display( &mut self, w: &mut W, screen: Screen, panel_skin: &PanelSkin, area: &Area, con: &AppContext, ) -> Result<(), ProgramError> { match self { Self::Image(iv) => iv.display(w, screen, panel_skin, area, con), Self::Syntactic(sv) => sv.display(w, screen, panel_skin, area, con), Self::ZeroLen(zlv) => zlv.display(w, screen, panel_skin, area), Self::Hex(hv) => hv.display(w, screen, panel_skin, area), Self::IoError(err) => { let mut y = area.top; w.queue(cursor::MoveTo(area.left, y))?; let mut cw = CropWriter::new(w, area.width as usize); cw.queue_str(&panel_skin.styles.default, "An error prevents the preview:")?; cw.fill(&panel_skin.styles.default, &SPACE_FILLING)?; y += 1;
cw.queue_g_string(&panel_skin.styles.status_error, err.to_string())?; cw.fill(&panel_skin.styles.default, &SPACE_FILLING)?; y += 1; while y < area.top + area.height { w.queue(cursor::MoveTo(area.left, y))?; let mut cw = CropWriter::new(w, area.width as usize); cw.fill(&panel_skin.styles.default, &SPACE_FILLING)?; y += 1; } Ok(()) } } } pub fn display_info( &mut self, w: &mut W, screen: Screen, panel_skin: &PanelSkin, area: &Area, ) -> Result<(), ProgramError> { match self { Self::Image(iv) => iv.display_info(w, screen, panel_skin, area), Self::Syntactic(sv) => sv.display_info(w, screen, panel_skin, area), Self::Hex(hv) => hv.display_info(w, screen, panel_skin, area), _ => Ok(()), } } }
w.queue(cursor::MoveTo(area.left, y))?; let mut cw = CropWriter::new(w, area.width as usize);
index.ts
import dayjs from "dayjs"; import { Arg, Args, Authorized, Ctx, Mutation, Query, Resolver, UseMiddleware } from "type-graphql"; import { Entity, getRepository } from "typeorm"; import { Stripe } from "../../../api"; import { isAuthorized } from "../../../auth"; import { AppContext } from "../../../middlewares/apollo/types"; import { Booking, Listing } from "../../entities"; import { ValidAntiForgeryToken } from "../../middlewares"; import { BookingDataResponse, PaginationArgs } from "../types"; import { CreateBookingArgs } from "./types"; @Entity() @Resolver(Booking) export class
{ @Mutation(() => Booking) @Authorized() @UseMiddleware(ValidAntiForgeryToken) async createBooking( @Ctx() ctx: AppContext, @Arg("input") input: CreateBookingArgs ): Promise<Booking> { try { const { id, source, checkIn, checkOut } = input; const listing = await Listing.findOne(id); if (!listing) { throw new Error("Listing can't be found"); } if (listing.hostId === ctx.req.user!.id) { throw new Error("User can't book own listing"); } const today = dayjs(); const checkInDate = dayjs(checkIn); const checkOutDate = dayjs(checkOut); const maxStayDate = dayjs(checkInDate.add(listing.maxStay, "day")); const minStayDate = checkInDate.add(listing.minStay, "day"); if (checkInDate.isAfter(today.add(90, "day"))) { throw new Error( "check in date can't be more than 90 days from today" ); } if (checkOutDate.isBefore(minStayDate)) { throw new Error( `check out date must be after a minimum stay of ${listing.minStay} days` ); } if (checkOutDate.isAfter(maxStayDate)) { throw new Error( `check out date can't be more than ${listing.maxStay} days from today` ); } const totalPrice = listing.price * (checkOutDate.diff(checkInDate, "day") + 1); const host = await listing.host; if (!host || !host.walletId) { throw new Error( "the host either can't be found or is not connected with Stripe" ); } await Stripe.charge(totalPrice, source, host.walletId); const booking = Booking.create({ checkIn: new Date(checkIn), checkOut: new Date(checkOut), tenantId: ctx.req.user!.id, listingId: listing.id }); await booking.save(); host.income = host.income + totalPrice; await host.save(); return booking; } catch (error) { throw new Error(`Failed to create a booking: ${error}`); } } @Query(() => BookingDataResponse, { nullable: true }) async bookingsForListing( @Ctx() ctx: AppContext, @Arg("listingId") listingId: string, @Args() input: PaginationArgs ): Promise<BookingDataResponse | null> { try { const listing = await Listing.findOne(listingId); if (!listing) { throw new Error("Listing can't be found"); } if (!isAuthorized(ctx.req) || ctx.req.user?.id !== listing.hostId) { return null; } const { limit, page } = input; const repository = getRepository(Booking); const data: BookingDataResponse = { total: 0, result: [] }; const [items, count] = await repository.findAndCount({ skip: page > 0 ? (page - 1) * limit : 0, take: limit, where: { listingId } }); data.total = count; data.result = items; return data; } catch (error) { throw new Error(`Failed to query listing bookings: ${error}`); } } @Query(() => BookingDataResponse, { nullable: true }) async bookingsForUser( @Ctx() ctx: AppContext, @Arg("userId") userId: string, @Args() input: PaginationArgs ): Promise<BookingDataResponse | null> { try { if (!isAuthorized(ctx.req) || ctx.req.user?.id !== userId) { return null; } const { limit, page } = input; const repository = getRepository(Booking); const data: BookingDataResponse = { total: 0, result: [] }; const [items, count] = await repository.findAndCount({ skip: page > 0 ? (page - 1) * limit : 0, take: limit, where: { tenantId: userId } }); data.total = count; data.result = items; return data; } catch (error) { throw new Error(`Failed to query bookings for user: ${error}`); } } }
BookingResolver
query_device.go
package iot //Licensed under the Apache License, Version 2.0 (the "License"); //you may not use this file except in compliance with the License. //You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //See the License for the specific language governing permissions and //limitations under the License. // // Code generated by Alibaba Cloud SDK Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" ) // QueryDevice invokes the iot.QueryDevice API synchronously // api document: https://help.aliyun.com/api/iot/querydevice.html func (client *Client) QueryDevice(request *QueryDeviceRequest) (response *QueryDeviceResponse, err error) { response = CreateQueryDeviceResponse() err = client.DoAction(request, response) return } // QueryDeviceWithChan invokes the iot.QueryDevice API asynchronously // api document: https://help.aliyun.com/api/iot/querydevice.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) QueryDeviceWithChan(request *QueryDeviceRequest) (<-chan *QueryDeviceResponse, <-chan error) { responseChan := make(chan *QueryDeviceResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response, err := client.QueryDevice(request) if err != nil
else { responseChan <- response } }) if err != nil { errChan <- err close(responseChan) close(errChan) } return responseChan, errChan } // QueryDeviceWithCallback invokes the iot.QueryDevice API asynchronously // api document: https://help.aliyun.com/api/iot/querydevice.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) QueryDeviceWithCallback(request *QueryDeviceRequest, callback func(response *QueryDeviceResponse, err error)) <-chan int { result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *QueryDeviceResponse var err error defer close(result) response, err = client.QueryDevice(request) callback(response, err) result <- 1 }) if err != nil { defer close(result) callback(nil, err) result <- 0 } return result } // QueryDeviceRequest is the request struct for api QueryDevice type QueryDeviceRequest struct { *requests.RpcRequest IotInstanceId string `position:"Query" name:"IotInstanceId"` PageSize requests.Integer `position:"Query" name:"PageSize"` CurrentPage requests.Integer `position:"Query" name:"CurrentPage"` ProductKey string `position:"Query" name:"ProductKey"` ApiProduct string `position:"Body" name:"ApiProduct"` ApiRevision string `position:"Body" name:"ApiRevision"` } // QueryDeviceResponse is the response struct for api QueryDevice type QueryDeviceResponse struct { *responses.BaseResponse RequestId string `json:"RequestId" xml:"RequestId"` Success bool `json:"Success" xml:"Success"` Code string `json:"Code" xml:"Code"` ErrorMessage string `json:"ErrorMessage" xml:"ErrorMessage"` Total int `json:"Total" xml:"Total"` PageSize int `json:"PageSize" xml:"PageSize"` PageCount int `json:"PageCount" xml:"PageCount"` Page int `json:"Page" xml:"Page"` Data DataInQueryDevice `json:"Data" xml:"Data"` } // CreateQueryDeviceRequest creates a request to invoke QueryDevice API func CreateQueryDeviceRequest() (request *QueryDeviceRequest) { request = &QueryDeviceRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("Iot", "2018-01-20", "QueryDevice", "iot", "openAPI") request.Method = requests.POST return } // CreateQueryDeviceResponse creates a response to parse from QueryDevice response func CreateQueryDeviceResponse() (response *QueryDeviceResponse) { response = &QueryDeviceResponse{ BaseResponse: &responses.BaseResponse{}, } return }
{ errChan <- err }
leading_character.rs
use super::Fix; use crate::common::*; pub(crate) struct LeadingCharacterFixer<'a> { name: &'a str, }
Self { name: "LeadingCharacter", } } } impl Fix for LeadingCharacterFixer<'_> { fn name(&self) -> &str { self.name } fn fix_line(&mut self, line: &mut LineEntry) -> Option<()> { let key = line.get_key()?; let cleaned_key = remove_invalid_leading_chars(&key); line.raw_string = format!("{}={}", cleaned_key, line.get_value()?); Some(()) } } #[cfg(test)] mod tests { use super::*; use crate::common::tests::*; #[test] fn fix_leading_dot() { let mut fixer = LeadingCharacterFixer::default(); let mut leading_period = line_entry(1, 1, ".FOO=BAR"); assert_eq!(Some(()), fixer.fix_line(&mut leading_period)); assert_eq!("FOO=BAR", leading_period.raw_string); } #[test] fn fix_leading_space() { let mut fixer = LeadingCharacterFixer::default(); let mut leading_space = line_entry(1, 1, " FOO=BAR"); assert_eq!(Some(()), fixer.fix_line(&mut leading_space)); assert_eq!("FOO=BAR", leading_space.raw_string); } #[test] fn fix_leading_asterisk() { let mut fixer = LeadingCharacterFixer::default(); let mut leading_asterisk = line_entry(1, 1, "*FOO=BAR"); assert_eq!(Some(()), fixer.fix_line(&mut leading_asterisk)); assert_eq!("FOO=BAR", leading_asterisk.raw_string); } #[test] fn fix_leading_number() { let mut fixer = LeadingCharacterFixer::default(); let mut leading_number = line_entry(1, 1, "1FOO=BAR"); assert_eq!(Some(()), fixer.fix_line(&mut leading_number)); assert_eq!("FOO=BAR", leading_number.raw_string); } #[test] fn fix_many_invalid_leading_chars() { let mut fixer = LeadingCharacterFixer::default(); let mut leading_number = line_entry(1, 1, "-1&*FOO=BAR"); assert_eq!(Some(()), fixer.fix_line(&mut leading_number)); assert_eq!("FOO=BAR", leading_number.raw_string); } #[test] fn leading_underscore_is_unchanged() { let mut fixer = LeadingCharacterFixer::default(); let mut leading_underscore = line_entry(1, 1, "_FOO=BAR"); assert_eq!(Some(()), fixer.fix_line(&mut leading_underscore)); assert_eq!("_FOO=BAR", leading_underscore.raw_string); } #[test] fn no_leading_char_is_unchanged() { let mut fixer = LeadingCharacterFixer::default(); let mut normal = line_entry(1, 1, "FOO=BAR"); assert_eq!(Some(()), fixer.fix_line(&mut normal)); assert_eq!("FOO=BAR", normal.raw_string); } #[test] fn fix_warnings_test() { let mut fixer = LeadingCharacterFixer::default(); let mut lines = vec![ line_entry(1, 7, ".FOO=BAR"), line_entry(2, 7, " Z=Y"), line_entry(3, 7, "*BAR=BAZ"), line_entry(4, 7, "1QUX=QUUX"), line_entry(5, 7, "_QUUX=FOOBAR"), line_entry(6, 7, "KEY=VALUE"), blank_line_entry(6, 7), ]; let mut warnings = vec![ Warning::new( lines[0].clone(), "LeadingCharacter", String::from("Invalid leading character detected"), ), Warning::new( lines[1].clone(), "LeadingCharacter", String::from("Invalid leading character detected"), ), Warning::new( lines[2].clone(), "LeadingCharacter", String::from("Invalid leading character detected"), ), Warning::new( lines[3].clone(), "LeadingCharacter", String::from("Invalid leading character detected"), ), ]; assert_eq!( Some(4), fixer.fix_warnings(warnings.iter_mut().collect(), &mut lines) ); assert_eq!("FOO=BAR", lines[0].raw_string); assert_eq!("Z=Y", lines[1].raw_string); assert_eq!("BAR=BAZ", lines[2].raw_string); assert_eq!("QUX=QUUX", lines[3].raw_string); assert_eq!("_QUUX=FOOBAR", lines[4].raw_string); assert_eq!("KEY=VALUE", lines[5].raw_string); assert_eq!("\n", lines[6].raw_string); } }
impl Default for LeadingCharacterFixer<'_> { fn default() -> Self {
html_form.go
package form import ( "database/sql/driver" "encoding/json" "net/http" "sort" "sync" "github.com/ory/x/sqlxx" "github.com/ory/jsonschema/v3" "github.com/ory/x/errorsx" "github.com/ory/x/decoderx" "github.com/ory/x/jsonschemax" "github.com/ory/x/jsonx" "github.com/ory/x/stringslice" ) var ( decoder = decoderx.NewHTTP() _ ErrorParser = new(HTMLForm) _ ValueSetter = new(HTMLForm) _ Resetter = new(HTMLForm) _ CSRFSetter = new(HTMLForm) ) // HTMLForm represents a HTML Form. The container can work with both HTTP Form and JSON requests // // swagger:model form type HTMLForm struct { sync.RWMutex // Action should be used as the form action URL `<form action="{{ .Action }}" method="post">`. // // required: true Action string `json:"action"` // Method is the form method (e.g. POST) // // required: true Method string `json:"method"` // Fields contains the form fields. // // required: true Fields Fields `json:"fields"` // Errors contains all form errors. These will be duplicates of the individual field errors. Errors []Error `json:"errors,omitempty"` } // NewHTMLForm returns an empty container. func NewHTMLForm(action string) *HTMLForm { return &HTMLForm{ Action: action, Method: "POST", Fields: Fields{}, } } // NewHTMLFormFromRequestBody creates a new HTMLForm and populates fields by parsing the HTTP Request body. // A jsonSchemaRef needs to be added to allow HTTP Form Post Body parsing. func NewHTMLFormFromRequestBody(r *http.Request, action string, compiler decoderx.HTTPDecoderOption) (*HTMLForm, error) { c := NewHTMLForm(action) raw := json.RawMessage(`{}`) if err := decoder.Decode(r, &raw, compiler, decoderx.HTTPDecoderSetIgnoreParseErrorsStrategy(decoderx.ParseErrorIgnore), ); err != nil { if err := c.ParseError(err); err != nil { return nil, err } } for k, v := range jsonx.Flatten(raw) { c.SetValue(k, v) } return c, nil } // NewHTMLFormFromJSON creates a HTML form based on the provided JSON struct. func NewHTMLFormFromJSON(action string, raw json.RawMessage, prefix string) *HTMLForm { c := NewHTMLForm(action) c.SetValuesFromJSON(raw, prefix) return c } // NewHTMLFormFromJSONSchema creates a new HTMLForm and populates the fields // using the provided JSON Schema. func NewHTMLFormFromJSONSchema(action, jsonSchemaRef, prefix string, compiler *jsonschema.Compiler) (*HTMLForm, error) { paths, err := jsonschemax.ListPaths(jsonSchemaRef, compiler) if err != nil { return nil, err } c := NewHTMLForm(action) for _, value := range paths { name := addPrefix(value.Name, prefix, ".") c.Fields = append(c.Fields, fieldFromPath(name, value)) } return c, nil } func (c *HTMLForm) SortFields(schemaRef, prefix string) error { sortFunc, err := c.Fields.sortBySchema(schemaRef, prefix) if err != nil { return err } sort.SliceStable(c.Fields, sortFunc) return nil } // Reset resets the container's errors as well as each field's value and errors. func (c *HTMLForm) ResetErrors(exclude ...string) { c.defaults() c.Lock() defer c.Unlock() c.Errors = nil for k, f := range c.Fields { if !stringslice.Has(exclude, f.Name) { f.Errors = nil } c.Fields[k] = f }
// Reset resets the container's errors as well as each field's value and errors. func (c *HTMLForm) Reset(exclude ...string) { c.defaults() c.Lock() defer c.Unlock() c.Errors = nil for k, f := range c.Fields { if !stringslice.Has(exclude, f.Name) { f.Reset() } c.Fields[k] = f } } // ParseError type asserts the given error and sets the container's errors or a // field's errors and if the error is not something to be handled by the // form container, the error is returned. // // This method DOES NOT touch the values of the form fields, only its errors. func (c *HTMLForm) ParseError(err error) error { c.defaults() switch e := errorsx.Cause(err).(type) { case richError: if e.StatusCode() == http.StatusBadRequest { c.AddError(&Error{Message: e.Reason()}) return nil } return err case *jsonschema.ValidationError: for _, err := range append([]*jsonschema.ValidationError{e}, e.Causes...) { pointer, _ := jsonschemax.JSONPointerToDotNotation(err.InstancePtr) if err.Context == nil { // The pointer can be ignored because if there is an error, we'll just use // the empty field (global error). c.AddError(&Error{Message: err.Message}, pointer) continue } switch ctx := err.Context.(type) { case *jsonschema.ValidationErrorContextRequired: for _, required := range ctx.Missing { // The pointer can be ignored because if there is an error, we'll just use // the empty field (global error). pointer, _ := jsonschemax.JSONPointerToDotNotation(required) c.AddError(&Error{Message: err.Message}, pointer) } default: c.AddError(&Error{Message: err.Message}, pointer) continue } } return nil } return err } // SetValues sets the container's fields to the provided values. func (c *HTMLForm) SetValues(values map[string]interface{}) { c.defaults() for k, v := range values { c.SetValue(k, v) } } // SetValuesFromJSON sets the container's fields to the provided values. func (c *HTMLForm) SetValuesFromJSON(raw json.RawMessage, prefix string) { c.defaults() for k, v := range jsonx.Flatten(raw) { if prefix != "" { k = prefix + "." + k } c.SetValue(k, v) } } // getField returns a pointer to the field with the given name. func (c *HTMLForm) getField(name string) *Field { // to prevent blocks we don't use c.defaults() here if c.Fields == nil { return nil } for i := range c.Fields { if c.Fields[i].Name == name { return &c.Fields[i] } } return nil } // SetRequired sets the container's fields required. func (c *HTMLForm) SetRequired(fields ...string) { c.defaults() c.Lock() defer c.Unlock() for _, field := range fields { if f := c.getField(field); f != nil { f.Required = true } } } // Unset removes a field from the container. func (c *HTMLForm) UnsetField(name string) { c.defaults() c.Lock() defer c.Unlock() for i := range c.Fields { if c.Fields[i].Name == name { c.Fields = append(c.Fields[:i], c.Fields[i+1:]...) return } } } // SetCSRF sets the CSRF value using e.g. nosurf.Token(r). func (c *HTMLForm) SetCSRF(token string) { c.SetField(Field{ Name: CSRFTokenName, Type: "hidden", Required: true, Value: token, }) } // SetField sets a field. func (c *HTMLForm) SetField(field Field) { c.defaults() c.Lock() defer c.Unlock() for i := range c.Fields { if c.Fields[i].Name == field.Name { c.Fields[i] = field return } } c.Fields = append(c.Fields, field) } // SetValue sets a container's field to the provided name and value. func (c *HTMLForm) SetValue(name string, value interface{}) { c.defaults() c.Lock() defer c.Unlock() if f := c.getField(name); f != nil { f.Value = value f.Type = toFormType(name, value) return } c.Fields = append(c.Fields, Field{ Name: name, Value: value, Type: toFormType(name, value), }) } // AddError adds the provided error, and if a non-empty names list is set, // adds the error on the corresponding field. func (c *HTMLForm) AddError(err *Error, names ...string) { c.defaults() c.Lock() defer c.Unlock() if len(stringslice.TrimSpaceEmptyFilter(names)) == 0 { c.Errors = append(c.Errors, *err) return } for _, name := range names { if ff := c.getField(name); ff != nil { ff.Errors = append(ff.Errors, *err) continue } c.Fields = append(c.Fields, Field{ Name: name, Errors: []Error{*err}, }) } } func (c *HTMLForm) Scan(value interface{}) error { return sqlxx.JSONScan(c, value) } func (c *HTMLForm) Value() (driver.Value, error) { return sqlxx.JSONValue(c) } func (c *HTMLForm) defaults() { c.Lock() defer c.Unlock() if c.Fields == nil { c.Fields = Fields{} } }
}
hashrouting.py
# -*- coding: utf-8 -*- """Implementations of all hash-routing strategies""" from __future__ import division import networkx as nx from collections import Counter from icarus.registry import register_strategy from icarus.util import inheritdoc, multicast_tree, path_links from icarus.scenarios.algorithms import extract_cluster_level_topology from .base import Strategy # TODO: THIS IS THE MAIN PLACE TO IMPLEMENT THE STORAGE # ASSIGNMENT STRATEGY!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! __all__ = [ 'Hashrouting', 'HashroutingEdge', 'HashroutingOnPath', 'HashroutingClustered', 'HashroutingSymmetric', 'HashroutingAsymmetric', 'HashroutingMulticast', 'HashroutingHybridAM', 'HashroutingHybridSM', ] class BaseHashrouting(Strategy): """Base class for all hash-routing implementations.""" @inheritdoc(Strategy) def __init__(self, view, controller, **kwargs): super(BaseHashrouting, self).__init__(view, controller) self.cache_nodes = view.cache_nodes() self.storage_nodes = view.storage_nodes() self.n_cache_nodes = len(self.cache_nodes) # Allocate results of hash function to caching nodes self.cache_assignment = {i: self.cache_nodes[i] for i in range(len(self.cache_nodes))} # Check if there are clusters if 'clusters' in self.view.topology().graph: self.clusters = self.view.topology().graph['clusters'] # Convert to list in case it comes as set or iterable for i, cluster in enumerate(self.clusters): self.clusters[i] = list(cluster) self.cluster_size = {i: len(self.clusters[i]) for i in range(len(self.clusters))} def authoritative_cache(self, content, cluster=None): """Return the authoritative cache node for the given content Parameters ---------- content : any hashable type The identifier of the content cluster : int, optional If the topology is divided in clusters, then retun the authoritative cache responsible for the content in the specified cluster Returns ------- authoritative_cache : any hashable type The node on which the authoritative cache is deployed """ # TODO: I should probably consider using a better non-cryptographic hash # also, may need to do smth to include "tags/labels" instead!!!!!! # THE # function, like xxhash h = hash(content) if cluster is not None: return self.clusters[cluster][h % self.cluster_size[cluster]] return self.cache_assignment[h % self.n_cache_nodes] def process_event(self, time, receiver, content, log): raise NotImplementedError('Cannot use BaseHashrouting class as is. ' 'This class is meant to be extended by other classes.') def authoritative_storage(self, labels_sources, content = 0): """Return the authoritative cache node for the given content Parameters ---------- labels : strings determining The identifier of the content cluster : int, optional If the topology is divided in clusters, then retun the authoritative cache responsible for the content in the specified cluster Returns ------- authoritative_cache : any hashable type The node on which the authoritative cache is deployed """ # TODO: I should probably consider using a better non-cryptographic hash # also, may need to do smth to include "tags/labels" instead!!!!!! # THE urrent_count = 0 # function, like xxhash auth_node = None for n, count in labels_sources: if count >= current_count: auth_node = self.storage_nodes[n] current_count = count return auth_node # TODO: Need to understand and get this sorted! @register_strategy('HASHROUTING') class Hashrouting(BaseHashrouting): """Unified implementation of the three basic hash-routing schemes: symmetric, asymmetric and multicast. Hash-routing implementations are described in [1]_. According to these strategies, edge nodes receiving a content request compute a hash function mapping the content identifier to a specific caching node and forward the request to that specific node. If the cache holds the requested content, it is returned to the user, otherwise it is forwarded to the original source. Similarly, when a content is delivered to the requesting user, it can be cached only by the caching node associated to the content identifier by the hash function. References ---------- .. [1] L. Saino, I. Psaras and G. Pavlou, Hash-routing Schemes for Information-Centric Networking, in Proceedings of ACM SIGCOMM ICN'13 workshop. Available: https://lorenzosaino.github.io/publications/hashrouting-icn13.pdf .. [2] L. Saino, On the Design of Efficient Caching Systems, Ph.D. thesis University College London, Dec. 2015. Available: http://discovery.ucl.ac.uk/1473436/ """ def __init__(self, view, controller, routing, **kwargs): """Constructor Parameters ---------- view : NetworkView An instance of the network view controller : NetworkController An instance of the network controller routing : str (SYMM | ASYMM | MULTICAST) Content routing option """ super(Hashrouting, self).__init__(view, controller) self.routing = routing @inheritdoc(Strategy) def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) cache = self.authoritative_cache(content) # handle (and log if required) actual request self.controller.start_session(time, receiver, content, log) # Forward request to authoritative cache self.controller.forward_request_path(receiver, cache) if self.controller.get_content(cache): # We have a cache hit here self.controller.forward_content_path(cache, receiver) else: # Cache miss: go all the way to source self.controller.forward_request_path(cache, source) if not self.controller.get_content(source): raise RuntimeError('The content is not found the expected source') if self.routing == 'SYMM': self.controller.forward_content_path(source, cache) # Insert in cache self.controller.put_content(cache) # Forward to receiver self.controller.forward_content_path(cache, receiver) elif self.routing == 'ASYMM': if cache in self.view.shortest_path(source, receiver): # Forward to cache self.controller.forward_content_path(source, cache) # Insert in cache self.controller.put_content(cache) # Forward to receiver self.controller.forward_content_path(cache, receiver) else: # Forward to receiver straight away self.controller.forward_content_path(source, receiver) elif self.routing == 'MULTICAST': if cache in self.view.shortest_path(source, receiver): self.controller.forward_content_path(source, cache) # Insert in cache self.controller.put_content(cache) # Forward to receiver self.controller.forward_content_path(cache, receiver) else: # Multicast cache_path = self.view.shortest_path(source, cache) recv_path = self.view.shortest_path(source, receiver) # find what is the node that has to fork the content flow for i in range(1, min([len(cache_path), len(recv_path)])): if cache_path[i] != recv_path[i]: fork_node = cache_path[i - 1] break else: fork_node = cache self.controller.forward_content_path(source, fork_node) self.controller.forward_content_path(fork_node, receiver) self.controller.forward_content_path(fork_node, cache, main_path=False) self.controller.put_content(cache) else: raise ValueError("Routing %s not supported" % self.routing) self.controller.end_session() @register_strategy('REPO_HASHROUTING') class Hashrouting(BaseHashrouting): """Unified implementation of the three basic hash-routing schemes: symmetric, asymmetric and multicast. Hash-routing implementations are described in [1]_. According to these strategies, edge nodes receiving a content request compute a hash function mapping the content identifier to a specific caching node and forward the request to that specific node. If the cache holds the requested content, it is returned to the user, otherwise it is forwarded to the original source. Similarly, when a content is delivered to the requesting user, it can be cached only by the caching node associated to the content identifier by the hash function. TODO: For Repo, label routing, this routing should also check for labels, if the request has labels and how many minimum labels are needed for each request References ---------- .. [1] L. Saino, I. Psaras and G. Pavlou, Hash-routing Schemes for Information-Centric Networking, in Proceedings of ACM SIGCOMM ICN'13 workshop. Available: https://lorenzosaino.github.io/publications/hashrouting-icn13.pdf .. [2] L. Saino, On the Design of Efficient Caching Systems, Ph.D. thesis University College London, Dec. 2015. Available: http://discovery.ucl.ac.uk/1473436/ """ def __init__(self, view, controller, routing, **kwargs): """Constructor Parameters ---------- view : NetworkView An instance of the network view controller : NetworkController An instance of the network controller routing : str (SYMM | ASYMM | MULTICAST) Content routing option """ super(Hashrouting, self).__init__(view, controller) self.routing = routing @inheritdoc(Strategy) def process_event(self, time, receiver, content, labels, log): # TODO: Maybe add deadline here...? # (esp. for repo strategies) # TODO: This is where data is fetched from and/or # added to a cache AND NEEDS TO BE ADAPTED TO BE # ADDED TO THE APPROPRIATE REPOSITORY, DEPENDING # ON SERVICES/LABELS SERVED, AS WELL! # Check the "self.routing" options, below!!! # get all required data source = self.view.content_source(content) labels_sources = self.view.labels_sources(labels) cache = self.authoritative_cache(content) storage = self.authoritative_storage(labels, labels_sources) # handle (and log if required) actual request self.controller.start_session(time, receiver, content, log) # Forward request to authoritative cache self.controller.forward_request_path(receiver, cache) if self.controller.get_content(cache): # We have a cache hit here self.controller.forward_content_path(cache, receiver) elif storage is not None and self.controller.has_message(content, labels, storage): self.controller.forward_request_path(cache, storage) self.controller.forward_repo_content_path(storage, receiver) else: # Cache miss: go all the way to source self.controller.forward_request_path(cache, source) if not self.controller.get_content(source): raise RuntimeError('The content is not found the expected source') # TODO: REVISE THESE AND ADD MAIN STORAGE STRATEGIES (AFTER)!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if self.routing == 'SYMM': self.controller.forward_content_path(source, cache) # Insert in cache self.controller.put_content(cache) # Forward to receiver self.controller.forward_content_path(cache, receiver) elif self.routing == 'ASYMM': if cache in self.view.shortest_path(source, receiver): # Forward to cache self.controller.forward_content_path(source, cache) # Insert in cache self.controller.put_content(cache) # Forward to receiver self.controller.forward_content_path(cache, receiver) else: # Forward to receiver straight away self.controller.forward_content_path(source, receiver) elif self.routing == 'MULTICAST': if cache in self.view.shortest_path(source, receiver): self.controller.forward_content_path(source, cache) # Insert in cache self.controller.put_content(cache) # Forward to receiver self.controller.forward_content_path(cache, receiver) else: # Multicast cache_path = self.view.shortest_path(source, cache) recv_path = self.view.shortest_path(source, receiver) # find what is the node that has to fork the content flow for i in range(1, min([len(cache_path), len(recv_path)])): if cache_path[i] != recv_path[i]: fork_node = cache_path[i - 1] break else: fork_node = cache self.controller.forward_content_path(source, fork_node) self.controller.forward_content_path(fork_node, receiver) self.controller.forward_content_path(fork_node, cache, main_path=False) self.controller.put_content(cache) else: raise ValueError("Routing %s not supported" % self.routing) self.controller.end_session() @register_strategy('HR_EDGE_CACHE') class HashroutingEdge(BaseHashrouting): """Hybrid hash-routing and edge caching. According to this strategy a fraction of the caching space in each cache is reserved for local caching. When a request is issued by a user, it is routed to the closes caching node and this caching node holds a copy of requested content in its local cache even if not authoritative for the requested content. Here we assume that each receiver is directly connected to one gateway, which is on the path to all other caches. References ---------- .. [2] L. Saino, On the Design of Efficient Caching Systems, Ph.D. thesis University College London, Dec. 2015. Available: http://discovery.ucl.ac.uk/1473436/ """ def __init__(self, view, controller, routing, edge_cache_ratio, **kwargs): """Constructor Parameters ---------- view : NetworkView An instance of the network view controller : NetworkController An instance of the network controller routing : str Content routing scheme: SYMM, ASYMM or MULTICAST edge_cache_ratio : float [0, 1] Ratio of cache allocated to uncoordinated edge cache """ if edge_cache_ratio < 0 or edge_cache_ratio > 1: raise ValueError('edge_cache_ratio must be between 0 and 1') super(HashroutingEdge, self).__init__(view, controller) self.routing = routing self.controller.reserve_local_cache(edge_cache_ratio) self.proxy = {v: list(self.view.topology().edge[v].keys())[0] for v in self.view.topology().receivers()} if any(v not in self.view.topology().cache_nodes() for v in self.proxy.values()): raise ValueError('There are receivers connected to a proxy without cache') @inheritdoc(Strategy) def process_event(self, time, receiver, content, log): # get all required data
@register_strategy('HR_ON_PATH') class HashroutingOnPath(BaseHashrouting): """Hybrid hash-routing and on-path caching. This strategy differs from HashroutingEdge for the fact that in HashroutingEdge, the local fraction of the cache is queried only by traffic of endpoints directly attached to the caching node. In HashroutingOnPath the local cache is queried by all traffic being forwarded by the node. References ---------- .. [2] L. Saino, On the Design of Efficient Caching Systems, Ph.D. thesis University College London, Dec. 2015. Available: http://discovery.ucl.ac.uk/1473436/ """ def __init__(self, view, controller, routing, on_path_cache_ratio, **kwargs): """Constructor Parameters ---------- view : NetworkView An instance of the network view controller : NetworkController An instance of the network controller routing : str Content routing scheme: SYMM, ASYMM or MULTICAST on_path_cache_ratio : float [0, 1] Ratio of cache allocated to uncoordinated on-path cache """ if on_path_cache_ratio < 0 or on_path_cache_ratio > 1: raise ValueError('on_path_cache_ratio must be between 0 and 1') super(HashroutingOnPath, self).__init__(view, controller) self.routing = routing self.controller.reserve_local_cache(on_path_cache_ratio) @inheritdoc(Strategy) def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) cache = self.authoritative_cache(content) # handle (and log if required) actual request self.controller.start_session(time, receiver, content, log) # Forward request to authoritative cache and check all local caches on path path = self.view.shortest_path(receiver, cache) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if v != cache: if self.controller.get_content_local_cache(v): serving_node = v direct_return = True break else: # No cache hits from local caches on path, query authoritative cache if self.controller.get_content(cache): serving_node = v direct_return = True else: path = self.view.shortest_path(cache, source) for u, v in path_links(path): self.controller.forward_request_hop(u, v) if v != source: if self.controller.get_content_local_cache(v): serving_node = v direct_return = False break else: # No hits from local caches in cache -> source path # Get content from the source self.controller.get_content(source) serving_node = source direct_return = False # Now we have a serving node, let's return the content, while storing # it on all opportunistic caches on the path if direct_return: # Here I just need to return the content directly to the user path = list(reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) if v != receiver: self.controller.put_content_local_cache(v) self.controller.end_session() return # Here I need to see whether I need symm, asymm or multicast delivery if self.routing == 'SYMM': links = path_links(list(reversed(self.view.shortest_path(cache, serving_node)))) + \ path_links(list(reversed(self.view.shortest_path(receiver, cache)))) for u, v in links: self.controller.forward_content_hop(u, v) if v == cache: self.controller.put_content(v) else: self.controller.put_content_local_cache(v) elif self.routing == 'ASYMM': path = list(reversed(self.view.shortest_path(receiver, serving_node))) for u, v in path_links(path): self.controller.forward_content_hop(u, v) if v == cache: self.controller.put_content(v) else: self.controller.put_content_local_cache(v) elif self.routing == 'MULTICAST': main_path = set(path_links(self.view.shortest_path(serving_node, receiver))) mcast_tree = multicast_tree(self.view.all_pairs_shortest_paths(), serving_node, [receiver, cache]) cache_branch = mcast_tree.difference(main_path) for u, v in cache_branch: self.controller.forward_content_hop(u, v, main_path=False) if v == cache: self.controller.put_content(v) else: self.controller.put_content_local_cache(v) for u, v in main_path: self.controller.forward_content_hop(u, v, main_path=True) if v == cache: self.controller.put_content(v) else: self.controller.put_content_local_cache(v) else: raise ValueError("Routing %s not supported" % self.routing) self.controller.end_session() @register_strategy('HR_CLUSTER') class HashroutingClustered(BaseHashrouting): """Hash-routing with clustering of the network. According to ths strategy, nodes of the network are divided in a number of clusters and hash-routing is used withing each of this clusters. In case of cache miss at a cluster, requests are forwarded to other clusters on the path to the original source. References ---------- .. [2] L. Saino, On the Design of Efficient Caching Systems, Ph.D. thesis University College London, Dec. 2015. Available: http://discovery.ucl.ac.uk/1473436/ """ def __init__(self, view, controller, intra_routing, inter_routing='LCE', **kwargs): """Constructor Parameters ---------- view : NetworkView An instance of the network view controller : NetworkController An instance of the network controller intra_routing : str Intra-cluster content routing scheme: SYMM, ASYMM or MULTICAST inter_routing : str Inter-cluster content routing scheme. Only supported LCE """ super(HashroutingClustered, self).__init__(view, controller) if intra_routing not in ('SYMM', 'ASYMM', 'MULTICAST'): raise ValueError('Intra-cluster routing policy %s not supported' % intra_routing) self.intra_routing = intra_routing self.inter_routing = inter_routing self.cluster_topology = extract_cluster_level_topology(view.topology()) self.cluster_sp = nx.all_pairs_shortest_path(self.cluster_topology) @inheritdoc(Strategy) def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) # handle (and log if required) actual request self.controller.start_session(time, receiver, content, log) receiver_cluster = self.view.cluster(receiver) source_cluster = self.view.cluster(source) cluster_path = self.cluster_sp[receiver_cluster][source_cluster] if self.inter_routing == 'LCE': start = receiver for cluster in cluster_path: cache = self.authoritative_cache(content, cluster) # Forward request to authoritative cache self.controller.forward_request_path(start, cache) start = cache if self.controller.get_content(cache): break else: # Loop was never broken, cache miss self.controller.forward_request_path(start, source) start = source if not self.controller.get_content(source): raise RuntimeError('The content is not found the expected source') elif self.inter_routing == 'EDGE': cache = self.authoritative_cache(content, receiver_cluster) self.controller.forward_request_path(receiver, cache) if self.controller.get_content(cache): self.controller.forward_content_path(cache, receiver) self.controller.end_session() return else: self.controller.forward_request_path(cache, source) self.controller.get_content(source) cluster = source_cluster start = source # Now "start" is the node that is serving the content cluster_path = list(reversed(self.cluster_sp[receiver_cluster][cluster])) if self.inter_routing == 'LCE': if self.intra_routing == 'SYMM': for cluster in cluster_path: cache = self.authoritative_cache(content, cluster) # Forward request to authoritative cache self.controller.forward_content_path(start, cache) self.controller.put_content(cache) start = cache self.controller.forward_content_path(start, receiver) elif self.intra_routing == 'ASYMM': self.controller.forward_content_path(start, receiver) path = self.view.shortest_path(start, receiver) traversed_clusters = set(self.view.cluster(v) for v in path) authoritative_caches = set(self.authoritative_cache(content, cluster) for cluster in traversed_clusters) traversed_caches = authoritative_caches.intersection(set(path)) for v in traversed_caches: self.controller.put_content(v) elif self.intra_routing == 'MULTICAST': destinations = [self.authoritative_cache(content, cluster) for cluster in cluster_path] for v in destinations: self.controller.put_content(v) main_path = set(path_links(self.view.shortest_path(start, receiver))) mcast_tree = multicast_tree(self.view.all_pairs_shortest_paths(), start, destinations) mcast_tree = mcast_tree.difference(main_path) for u, v in mcast_tree: self.controller.forward_content_hop(u, v, main_path=False) for u, v in main_path: self.controller.forward_content_hop(u, v, main_path=True) else: raise ValueError("Intra-cluster routing %s not supported" % self.intra_routing) elif self.inter_routing == 'EDGE': if self.intra_routing == 'SYMM': cache = self.authoritative_cache(content, cluster_path[-1]) self.controller.forward_content_path(start, cache) self.controller.forward_content_path(cache, receiver) path = self.view.shortest_path(start, receiver) traversed_clusters = set(self.view.cluster(v) for v in path) authoritative_caches = set(self.authoritative_cache(content, cluster) for cluster in traversed_clusters) traversed_caches = authoritative_caches.intersection(set(path)) for v in traversed_caches: self.controller.put_content(v) if cache not in traversed_caches: self.controller.put_content(cache) elif self.intra_routing == 'ASYMM': self.controller.forward_content_path(start, receiver) path = self.view.shortest_path(start, receiver) traversed_clusters = set(self.view.cluster(v) for v in path) authoritative_caches = set(self.authoritative_cache(content, cluster) for cluster in traversed_clusters) traversed_caches = authoritative_caches.intersection(set(path)) for v in traversed_caches: self.controller.put_content(v) elif self.intra_routing == 'MULTICAST': cache = self.authoritative_cache(content, cluster_path[-1]) self.controller.put_content(cache) main_path = set(path_links(self.view.shortest_path(start, receiver))) mcast_tree = multicast_tree(self.view.all_pairs_shortest_paths(), start, [cache]) mcast_tree = mcast_tree.difference(main_path) for u, v in mcast_tree: self.controller.forward_content_hop(u, v, main_path=False) for u, v in main_path: self.controller.forward_content_hop(u, v, main_path=True) else: raise ValueError("Inter-cluster routing %s not supported" % self.inter_routing) self.controller.end_session() @register_strategy('HR_SYMM') class HashroutingSymmetric(Hashrouting): """Hash-routing with symmetric routing (HR SYMM) According to this strategy, each content is routed following the same path of the request. References ---------- .. [1] L. Saino, I. Psaras and G. Pavlou, Hash-routing Schemes for Information-Centric Networking, in Proceedings of ACM SIGCOMM ICN'13 workshop. Available: https://lorenzosaino.github.io/publications/hashrouting-icn13.pdf .. [2] L. Saino, On the Design of Efficient Caching Systems, Ph.D. thesis University College London, Dec. 2015. Available: http://discovery.ucl.ac.uk/1473436/ """ @inheritdoc(Strategy) def __init__(self, view, controller, **kwargs): super(HashroutingSymmetric, self).__init__(view, controller, 'SYMM', **kwargs) @register_strategy('HR_ASYMM') class HashroutingAsymmetric(Hashrouting): """Hash-routing with asymmetric routing (HR ASYMM) According to this strategy, each content fetched from an original source, as a result of a cache miss, is routed towards the receiver following the shortest path. If the authoritative cache is on the path, then it caches the content, otherwise not. References ---------- .. [1] L. Saino, I. Psaras and G. Pavlou, Hash-routing Schemes for Information-Centric Networking, in Proceedings of ACM SIGCOMM ICN'13 workshop. Available: https://lorenzosaino.github.io/publications/hashrouting-icn13.pdf .. [2] L. Saino, On the Design of Efficient Caching Systems, Ph.D. thesis University College London, Dec. 2015. Available: http://discovery.ucl.ac.uk/1473436/ """ @inheritdoc(Strategy) def __init__(self, view, controller, **kwargs): super(HashroutingAsymmetric, self).__init__(view, controller, 'ASYMM', **kwargs) @register_strategy('HR_MULTICAST') class HashroutingMulticast(Hashrouting): """Hash-routing implementation with multicast delivery of content packets. In this strategy, if there is a cache miss, when contents return in the domain, they are multicast. One copy is sent to the authoritative cache and the other to the receiver. If the cache is on the path from source to receiver, this strategy behaves as a normal symmetric hash-routing strategy. References ---------- .. [1] L. Saino, I. Psaras and G. Pavlou, Hash-routing Schemes for Information-Centric Networking, in Proceedings of ACM SIGCOMM ICN'13 workshop. Available: https://lorenzosaino.github.io/publications/hashrouting-icn13.pdf .. [2] L. Saino, On the Design of Efficient Caching Systems, Ph.D. thesis University College London, Dec. 2015. Available: http://discovery.ucl.ac.uk/1473436/ """ @inheritdoc(Strategy) def __init__(self, view, controller, **kwargs): super(HashroutingMulticast, self).__init__(view, controller, 'MULTICAST', **kwargs) @register_strategy('HR_HYBRID_AM') class HashroutingHybridAM(BaseHashrouting): """Hash-routing implementation with hybrid asymmetric-multicast delivery of content packets. In this strategy, if there is a cache miss, when content packets return in the domain, the packet is delivered to the receiver following the shortest path. If the additional number of hops required to send a copy to the authoritative cache is below a specific fraction of the network diameter, then one copy is sent to the authoritative cache as well. If the cache is on the path from source to receiver, this strategy behaves as a normal symmetric hash-routing strategy. References ---------- .. [1] L. Saino, I. Psaras and G. Pavlou, Hash-routing Schemes for Information-Centric Networking, in Proceedings of ACM SIGCOMM ICN'13 workshop. Available: https://lorenzosaino.github.io/publications/hashrouting-icn13.pdf """ def __init__(self, view, controller, max_stretch=0.2, **kwargs): """Constructor Parameters ---------- view : NetworkView An instance of the network view controller : NetworkController An instance of the network controller max_stretch : float, optional The threshold path stretch (normalized by network diameter) set to decide whether using asymmetric or multicast routing. If the path stretch required to deliver a content is above max_stretch asymmetric delivery is used, otherwise multicast delivery is used. """ super(HashroutingHybridAM, self).__init__(view, controller) self.max_stretch = nx.diameter(view.topology()) * max_stretch @inheritdoc(Strategy) def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) cache = self.authoritative_cache(content) # handle (and log if required) actual request self.controller.start_session(time, receiver, content, log) # Forward request to authoritative cache self.controller.forward_request_path(receiver, cache) if self.controller.get_content(cache): # We have a cache hit here self.controller.forward_content_path(cache, receiver) else: # Cache miss: go all the way to source self.controller.forward_request_path(cache, source) if not self.controller.get_content(source): raise RuntimeError('The content was not found at the expected source') if cache in self.view.shortest_path(source, receiver): # Forward to cache self.controller.forward_content_path(source, cache) # Insert in cache self.controller.put_content(cache) # Forward to receiver self.controller.forward_content_path(cache, receiver) else: # Multicast cache_path = self.view.shortest_path(source, cache) recv_path = self.view.shortest_path(source, receiver) # find what is the node that has to fork the content flow for i in range(1, min([len(cache_path), len(recv_path)])): if cache_path[i] != recv_path[i]: fork_node = cache_path[i - 1] break else: fork_node = cache self.controller.forward_content_path(source, receiver, main_path=True) # multicast to cache only if stretch is under threshold if len(self.view.shortest_path(fork_node, cache)) - 1 < self.max_stretch: self.controller.forward_content_path(fork_node, cache, main_path=False) self.controller.put_content(cache) self.controller.end_session() @register_strategy('HR_HYBRID_SM') class HashroutingHybridSM(BaseHashrouting): """Hash-routing implementation with hybrid symmetric-multicast delivery of content packets. In this implementation, the edge router receiving a content packet decides whether to deliver the packet using multicast or symmetric hash-routing based on the total cost for delivering the Data to both cache and receiver in terms of hops. References ---------- .. [1] L. Saino, I. Psaras and G. Pavlou, Hash-routing Schemes for Information-Centric Networking, in Proceedings of ACM SIGCOMM ICN'13 workshop. Available: https://lorenzosaino.github.io/publications/hashrouting-icn13.pdf """ @inheritdoc(Strategy) def __init__(self, view, controller, **kwargs): super(HashroutingHybridSM, self).__init__(view, controller) @inheritdoc(Strategy) def process_event(self, time, receiver, content, log): # get all required data source = self.view.content_source(content) cache = self.authoritative_cache(content) # handle (and log if required) actual request self.controller.start_session(time, receiver, content, log) # Forward request to authoritative cache self.controller.forward_request_path(receiver, cache) if self.controller.get_content(cache): # We have a cache hit here self.controller.forward_content_path(cache, receiver) else: # Cache miss: go all the way to source self.controller.forward_request_path(cache, source) if not self.controller.get_content(source): raise RuntimeError('The content is not found the expected source') if cache in self.view.shortest_path(source, receiver): self.controller.forward_content_path(source, cache) # Insert in cache self.controller.put_content(cache) # Forward to receiver self.controller.forward_content_path(cache, receiver) else: # Multicast cache_path = self.view.shortest_path(source, cache) recv_path = self.view.shortest_path(source, receiver) # find what is the node that has to fork the content flow for i in range(1, min([len(cache_path), len(recv_path)])): if cache_path[i] != recv_path[i]: fork_node = cache_path[i - 1] break else: fork_node = cache symmetric_path_len = len(self.view.shortest_path(source, cache)) + \ len(self.view.shortest_path(cache, receiver)) - 2 multicast_path_len = len(self.view.shortest_path(source, fork_node)) + \ len(self.view.shortest_path(fork_node, cache)) + \ len(self.view.shortest_path(fork_node, receiver)) - 3 self.controller.put_content(cache) # If symmetric and multicast have equal cost, choose symmetric # because of easier packet processing if symmetric_path_len <= multicast_path_len: # use symmetric delivery # Symmetric delivery self.controller.forward_content_path(source, cache, main_path=True) self.controller.forward_content_path(cache, receiver, main_path=True) else: # Multicast delivery self.controller.forward_content_path(source, receiver, main_path=True) self.controller.forward_content_path(fork_node, cache, main_path=False) self.controller.end_session()
source = self.view.content_source(content) cache = self.authoritative_cache(content) # handle (and log if required) actual request self.controller.start_session(time, receiver, content, log) proxy = self.proxy[receiver] self.controller.forward_request_hop(receiver, proxy) if proxy != cache: if self.controller.get_content_local_cache(proxy): self.controller.forward_content_hop(proxy, receiver) self.controller.end_session() return else: # Forward request to authoritative cache self.controller.forward_request_path(proxy, cache) if self.controller.get_content(cache): # We have a cache hit here self.controller.forward_content_path(cache, proxy) else: # Cache miss: go all the way to source self.controller.forward_request_path(cache, source) if not self.controller.get_content(source): raise RuntimeError('The content is not found the expected source') if self.routing == 'SYMM': self.controller.forward_content_path(source, cache) # Insert in cache self.controller.put_content(cache) # Forward to receiver self.controller.forward_content_path(cache, proxy) elif self.routing == 'ASYMM': if cache in self.view.shortest_path(source, proxy): # Forward to cache self.controller.forward_content_path(source, cache) # Insert in cache self.controller.put_content(cache) # Forward to receiver self.controller.forward_content_path(cache, proxy) else: # Forward to receiver straight away self.controller.forward_content_path(source, proxy) elif self.routing == 'MULTICAST': if cache in self.view.shortest_path(source, proxy): self.controller.forward_content_path(source, cache) # Insert in cache self.controller.put_content(cache) # Forward to receiver self.controller.forward_content_path(cache, receiver) else: # Multicast cache_path = self.view.shortest_path(source, cache) recv_path = self.view.shortest_path(source, proxy) # find what is the node that has to fork the content flow for i in range(1, min([len(cache_path), len(recv_path)])): if cache_path[i] != recv_path[i]: fork_node = cache_path[i - 1] break else: fork_node = cache self.controller.forward_content_path(source, fork_node) self.controller.forward_content_path(fork_node, proxy) self.controller.forward_content_path(fork_node, cache, main_path=False) self.controller.put_content(cache) else: raise ValueError("Routing %s not recognized" % self.routing) if proxy != cache: self.controller.put_content_local_cache(proxy) self.controller.forward_content_hop(proxy, receiver) self.controller.end_session()
machine.rs
use crate::program::Program; use crate::vm::callbacks::Callbacks; use crate::vm::cpu::alu; use crate::vm::cpu::processor::Processor; use crate::vm::cpu::state::State; use crate::vm::ram::Memory; pub struct Machine { pub cpu: Processor, pub ram: Memory, run: bool, } impl Machine { pub fn new() -> Machine { Machine { cpu: Processor::new(), ram: Memory::new(), run: false, } } pub fn load_at(&mut self, program: &Program, start_address: u16) -> bool { let end = start_address as u32 + program.raw().len() as u32; let will_fit = end <= 65536; let mut address = start_address; if will_fit { for value in program.raw() { self.ram.write_u8(address, *value); address = address.wrapping_add(1); } } will_fit } pub fn load(&mut self, program: &Program) -> bool { self.load_at(program, 0) } pub fn start_at(&mut self, address: u16) { self.start_with_options(address, &mut Callbacks::new()) } pub fn start(&mut self) { self.start_at(0); } pub fn start_with_options(&mut self, address: u16, callbacks: &mut Callbacks) { self.cpu.goto(address); self.run = true; while self.run { if self.cpu.is_halted() { self.nop(); } else { self.execute_with(callbacks); } } } pub fn stop(&mut self) { self.run = false; } pub fn get_register<T>(&self, selector: fn(&State) -> T) -> T
pub fn get_register_mut<T: Copy + Clone>(&mut self, selector: fn(&mut State) -> &mut T) -> T { *selector(&mut self.cpu.state) } pub fn get_register_pair(&self, selector: fn(&State) -> (u8, u8)) -> u16 { alu::get_word(self.get_register(selector)) } pub fn get_register_pair_mut(&mut self, selector: fn(&mut State) -> &mut (u8, u8)) -> u16 { alu::get_word(( selector(&mut self.cpu.state).0, selector(&mut self.cpu.state).1, )) } pub fn set_register<T>(&mut self, selector: fn(&mut State) -> &mut T, value: T) { *selector(&mut self.cpu.state) = value; } pub fn set_register_pair(&mut self, selector: fn(&mut State) -> &mut (u8, u8), value: u16) { *selector(&mut self.cpu.state) = alu::get_octets(value); } }
{ selector(&self.cpu.state) }
files.go
package files import ( "encoding/json" "io/ioutil" "net/url" "os" "os/user" "path/filepath" "strings" ) // ToAbsolutePath handles different kind of paths and makes an absolute path // out of them. Consider the following three inputs: // file:///home/marcel/test.txt%C3%A4 // ./test.txtä // ~/test.txtä // Those will be turned into (assuming that our current working directory // is /home/marcel: // /home/marcel/test.txtä // However, this method doesn't check for file existence. func ToAbsolutePath(input string) (string, error) { var resolvedPath string if strings.HasPrefix(input, "~") { currentUser, userResolveError := user.Current() if userResolveError != nil { return "", userResolveError } resolvedPath = filepath.Join(currentUser.HomeDir, strings.TrimPrefix(input, "~")) } else { resolvedPath = strings.TrimPrefix(input, "file://") var unescapeError error resolvedPath, unescapeError = url.PathUnescape(resolvedPath) if unescapeError != nil { return "", unescapeError } } resolvedPath, resolveError := filepath.Abs(resolvedPath) if resolveError != nil { return "", resolveError } return resolvedPath, nil } func GetAbsolutePath(directoryPath string) (string, error) { absolutePath, resolveError := ToAbsolutePath(directoryPath) if resolveError != nil { return "", resolveError } return absolutePath, resolveError } func Ens
rectoryPath string) error { _, statError := os.Stat(directoryPath) if os.IsNotExist(statError) { createDirsError := os.MkdirAll(directoryPath, 0766) if createDirsError != nil { return createDirsError } return nil } return statError } func CheckExists(path string) error { _, statError := os.Stat(path) return statError } func LoadJSON(path string, store interface{}) error { existsError := CheckExists(path) if existsError != nil { return existsError } file, readError := ioutil.ReadFile(path) if readError != nil { return readError } jsonError := json.Unmarshal([]byte(file), &store) if jsonError != nil { return jsonError } return nil } func WriteJSON(path string, store interface{}) error { jsonContents, jsonError := json.MarshalIndent(store, "", " ") if jsonError != nil { return jsonError } writeError := ioutil.WriteFile(path, jsonContents, 0644) if writeError != nil { return writeError } return nil }
ureDirectory(di
zfm-access-token-payload.ts
export class
{ username: string; role: string; sub: string; iat?: number; exp?: number }
AccessTokenPayload
last-merge-batches.py
#! /usr/bin/env python # Copyright 2010, 2011 Martin C. Frith import fileinput, itertools, optparse, os, signal, sys def
(lines): for line in lines: if line.startswith("# batch"): yield line else: print line, while True: yield None def lastMergeBatches(fileNames): files = map(fileinput.input, fileNames) b = map(batches, files) for i in itertools.izip(*b): j = filter(None, i) if j: print j[0], else: break if __name__ == "__main__": signal.signal(signal.SIGPIPE, signal.SIG_DFL) # avoid silly error message usage = "%prog files" description = "Read files of lastal output, merge corresponding batches, and write them." op = optparse.OptionParser(usage=usage, description=description) opts, args = op.parse_args() if not args: op.error("please give me some file names") try: lastMergeBatches(args) except KeyboardInterrupt: pass # avoid silly error message except Exception, e: prog = os.path.basename(sys.argv[0]) sys.exit(prog + ": error: " + str(e))
batches
odbc.py
""" Return data to an ODBC compliant server. This driver was developed with Microsoft SQL Server in mind, but theoretically could be used to return data to any compliant ODBC database as long as there is a working ODBC driver for it on your minion platform. :maintainer: C. R. Oldham ([email protected]) :maturity: New :depends: unixodbc, pyodbc, freetds (for SQL Server) :platform: all To enable this returner the minion will need On Linux: unixodbc (http://www.unixodbc.org) pyodbc (`pip install pyodbc`) The FreeTDS ODBC driver for SQL Server (http://www.freetds.org) or another compatible ODBC driver On Windows: TBD unixODBC and FreeTDS need to be configured via /etc/odbcinst.ini and /etc/odbc.ini. /etc/odbcinst.ini:: [TDS] Description=TDS Driver=/usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so (Note the above Driver line needs to point to the location of the FreeTDS shared library. This example is for Ubuntu 14.04.) /etc/odbc.ini:: [TS] Description = "Salt Returner" Driver=TDS Server = <your server ip or fqdn> Port = 1433 Database = salt Trace = No Also you need the following values configured in the minion or master config. Configure as you see fit:: returner.odbc.dsn: 'TS' returner.odbc.user: 'salt' returner.odbc.passwd: 'salt' Alternative configuration values can be used by prefacing the configuration. Any values not found in the alternative configuration will be pulled from the default location:: alternative.returner.odbc.dsn: 'TS' alternative.returner.odbc.user: 'salt' alternative.returner.odbc.passwd: 'salt' Running the following commands against Microsoft SQL Server in the desired database as the appropriate user should create the database tables correctly. Replace with equivalent SQL for other ODBC-compliant servers .. code-block:: sql -- -- Table structure for table 'jids' -- if OBJECT_ID('dbo.jids', 'U') is not null DROP TABLE dbo.jids CREATE TABLE dbo.jids ( jid varchar(255) PRIMARY KEY, load varchar(MAX) NOT NULL ); -- -- Table structure for table 'salt_returns' -- IF OBJECT_ID('dbo.salt_returns', 'U') IS NOT NULL DROP TABLE dbo.salt_returns; CREATE TABLE dbo.salt_returns ( added datetime not null default (getdate()), fun varchar(100) NOT NULL, jid varchar(255) NOT NULL, retval varchar(MAX) NOT NULL, id varchar(255) NOT NULL, success bit default(0) NOT NULL, full_ret varchar(MAX) ); CREATE INDEX salt_returns_added on dbo.salt_returns(added); CREATE INDEX salt_returns_id on dbo.salt_returns(id); CREATE INDEX salt_returns_jid on dbo.salt_returns(jid); CREATE INDEX salt_returns_fun on dbo.salt_returns(fun); To use this returner, append '--return odbc' to the salt command. .. code-block:: bash salt '*' status.diskusage --return odbc To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return odbc --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return odbc --return_kwargs '{"dsn": "dsn-name"}' """ import salt.returners import salt.utils.jid import salt.utils.json # FIXME We'll need to handle this differently for Windows. try: import pyodbc # import psycopg2.extras HAS_ODBC = True except ImportError: HAS_ODBC = False # Define the module's virtual name __virtualname__ = "odbc" def __virtual__(): if not HAS_ODBC: return False, "Could not import odbc returner; pyodbc is not installed." return True def _get_options(ret=None): """ Get the odbc options from salt. """ attrs = {"dsn": "dsn", "user": "user", "passwd": "passwd"} _options = salt.returners.get_returner_options( "returner.{}".format(__virtualname__), ret, attrs, __salt__=__salt__, __opts__=__opts__, ) return _options def _get_conn(ret=None): """ Return a MSSQL connection. """ _options = _get_options(ret) dsn = _options.get("dsn") user = _options.get("user") passwd = _options.get("passwd") return pyodbc.connect("DSN={};UID={};PWD={}".format(dsn, user, passwd)) def _close_conn(conn): """ Close the MySQL connection """ conn.commit() conn.close() def returner(ret): """ Return data to an odbc server """ conn = _get_conn(ret) cur = conn.cursor() sql = """INSERT INTO salt_returns (fun, jid, retval, id, success, full_ret) VALUES (?, ?, ?, ?, ?, ?)""" cur.execute( sql, ( ret["fun"], ret["jid"], salt.utils.json.dumps(ret["return"]), ret["id"], ret["success"], salt.utils.json.dumps(ret), ), ) _close_conn(conn) def save_load(jid, load, minions=None): """ Save the load to the specified jid id """ conn = _get_conn(ret=None) cur = conn.cursor() sql = """INSERT INTO jids (jid, load) VALUES (?, ?)""" cur.execute(sql, (jid, salt.utils.json.dumps(load))) _close_conn(conn) def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument """ Included for API consistency """ def get_load(jid): """ Return the load data that marks a specified jid """ conn = _get_conn(ret=None) cur = conn.cursor() sql = """SELECT load FROM jids WHERE jid = ?;""" cur.execute(sql, (jid,)) data = cur.fetchone() if data: return salt.utils.json.loads(data) _close_conn(conn) return {} def get_jid(jid): """ Return the information returned when the specified job id was executed """ conn = _get_conn(ret=None) cur = conn.cursor() sql = """SELECT id, full_ret FROM salt_returns WHERE jid = ?""" cur.execute(sql, (jid,)) data = cur.fetchall() ret = {} if data: for minion, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) _close_conn(conn) return ret def
(fun): """ Return a dict of the last function called for all minions """ conn = _get_conn(ret=None) cur = conn.cursor() sql = """SELECT s.id,s.jid, s.full_ret FROM salt_returns s JOIN ( SELECT MAX(jid) AS jid FROM salt_returns GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = ? """ cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, retval in data: ret[minion] = salt.utils.json.loads(retval) _close_conn(conn) return ret def get_jids(): """ Return a list of all job ids """ conn = _get_conn(ret=None) cur = conn.cursor() sql = """SELECT distinct jid, load FROM jids""" cur.execute(sql) data = cur.fetchall() ret = {} for jid, load in data: ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load)) _close_conn(conn) return ret def get_minions(): """ Return a list of minions """ conn = _get_conn(ret=None) cur = conn.cursor() sql = """SELECT DISTINCT id FROM salt_returns""" cur.execute(sql) data = cur.fetchall() ret = [] for minion in data: ret.append(minion[0]) _close_conn(conn) return ret def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument """ Do any work necessary to prepare a JID, including sending a custom id """ return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
get_fun
gulpfile.babel.js
/** * * Quick Logcat * Copyright 2019 Google Inc. All rights reserved.
* you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License * */ import gulp from 'gulp'; import del from 'del'; import gulpLoadPlugins from 'gulp-load-plugins'; import { rollup } from 'rollup'; import { terser } from 'rollup-plugin-terser'; import babel from 'rollup-plugin-babel'; const $ = gulpLoadPlugins(); // Optimize images let images = () => gulp.src('app/images/**/*') .pipe($.cache($.imagemin({ progressive: true, interlaced: true }))) .pipe(gulp.dest('dist/images')) .pipe($.size({title: 'images'})); // Copy all files at the root level (app) let copy = () => gulp.src([ 'app/*', '!app/*.html' ], { dot: true }).pipe(gulp.dest('dist')) .pipe($.size({title: 'copy'})); // Compile and automatically prefix stylesheets let styles = () => { const AUTOPREFIXER_BROWSERS = [ 'ie >= 10', 'ie_mob >= 10', 'ff >= 30', 'chrome >= 34', 'safari >= 7', 'opera >= 23', 'ios >= 7', 'android >= 4.4', 'bb >= 10' ]; // For best performance, don't add Sass partials to `gulp.src` return gulp.src([ 'app/styles/**/*.css' ]) // .pipe($.newer('.tmp/styles')) .pipe($.autoprefixer(AUTOPREFIXER_BROWSERS)) .pipe(gulp.dest('.tmp/styles')) .pipe($.concat('all.css')) // Concatenate and minify styles .pipe($.cssnano()) .pipe($.size({title: 'styles'})) .pipe(gulp.dest('dist/styles')); }; // Scan your HTML for assets & optimize them let html = () => { return gulp.src('app/**/*.html') .pipe($.useref({ searchPath: '{.tmp,app}', noAssets: true })) // Minify any HTML .pipe($.if('*.html', $.htmlmin({ removeComments: true, collapseWhitespace: true, collapseBooleanAttributes: true, removeAttributeQuotes: true, removeRedundantAttributes: true, removeEmptyAttributes: true, removeScriptTypeAttributes: true, removeStyleLinkTypeAttributes: true, removeOptionalTags: true }))) // Output files .pipe($.if('*.html', $.size({title: 'html', showFiles: true}))) .pipe(gulp.dest('dist')); }; let clean = () => { return del(['.tmp', 'dist/*', '!dist/.git'], {dot: true}); }; let sw = () => { return rollup({ input: './app/sw.js', plugins: [ terser() ] }).then(bundle => { return bundle.write({ file: './dist/sw.js', format: 'iife' }) }) } let client_modules = () => { return rollup({ input: './app/scripts/main.mjs', plugins: [ terser() ] }).then(bundle => { return bundle.write({ file: './dist/scripts/main.mjs', format: 'es' }) }) }; let client = () => { return rollup({ input: './app/scripts/main.mjs', plugins: [ babel({ babelrc: false, presets: [['@babel/env',{"targets": { "chrome": "41" }}]], exclude: 'node_modules/**', }), terser() ] }).then(bundle => { return bundle.write({ file: './dist/scripts/main.js', name: 'main.js', format: 'iife' }) }) }; let build = gulp.series(clean, copy, gulp.parallel(client, client_modules, sw, styles, html, images)); gulp.task('default', build); gulp.task('client_modules', client_modules); gulp.task('client', client);
* * Licensed under the Apache License, Version 2.0 (the "License");
lib.rs
pub mod client; pub mod types; pub use crate::client::Client; /// Error type #[derive(Debug, thiserror::Error)] pub enum Error { /// Reqwest HTTP Error #[error("{0}")] Reqwest(#[from] reqwest::Error), /// Invalid HTTP Status #[error("{0}")] InvalidStatus(reqwest::StatusCode), /// Invalid Json #[error("{0}")] Json(#[from] serde_json::Error), /// Invalid Api Error #[error("api error ({0})")] Api(String), } /// Result Type
mod test { use super::*; const KEY: &str = include_str!("../key.txt"); #[tokio::test] async fn random() { let client = Client::new(KEY.into()); let data = client.list_random(5).await.unwrap(); println!("{:#?}", data); assert!(!data.is_empty()); } }
pub type FmlResult<T> = Result<T, Error>; #[cfg(test)]
emulator.py
from unicorn import * from unicorn.x86_const import * from capstone import * from importlib import import_module from emulation.syscall import clean_stack import argparse import emulation.syscall as winsyscall import pefile import struct import sys import ast import os #TODO: Deal with SEH structure #TODO: Randomize TEB base address #TODO: Randomize process ID #TODO: Randomize thread ID #TODO: Process management #TODO: Thread management #TODO: Fake FileSystem #TODO: Fake running process API_refs = 'winapi_9k.csv' regs = ['eax', 'ebx', 'ecx', 'edx', 'esp', 'ebp', 'edi', 'esi'] md = Cs(CS_ARCH_X86, CS_MODE_32) full_content = '' class Environment: def __init__(self, args): # Argument validation self.breakpoint = args.breakpoint self.trace = args.trace self.dump = args.dump self.silent = args.silent self.out = args.out self.stack = args.stack self.registers = args.registers self.debug = args.debug self.handle_list = args.handle self.show_extract = args.extract self.imports = args.imports self.dynamics = [] if self.trace: self.calltrace = [] if self.stack and self.registers: self.dump = True if self.dump: self.registers = True self.stack = True path = args.path self.shortname = path.split('/')[-1].split('.')[0].lower() self.drivename = 'C:\\Users\\EllenRipley\\Desktop\\' + self.shortname self.username = 'EllenRipley' self.computername = 'Nostromo' self.computer_mac = '0F-0C-95-86-20-29' self.computer_ip = '192.168.0.12' self.path = path self.chunks = [] self.virtual_memory = [] self.resources = {} self.extracts = {} self.threads = [] self.thread_ret = None self.thread_trace = [] self.thread_max_replay = 5 self.max_loop = 10 self.current_loop_counter = 0 self.previous_loop = [] self.current_loop = [] self.execution_mode = 'default' self.uc = Uc(UC_ARCH_X86, UC_MODE_32) self.handle = {'0xaa': ['placeholder_dynamic_handle', 'dummy']} try: self.pe = pefile.PE(path) except OSError as e: print(e) exit -1 except pefile.PEFormatError as e: print(f'Malformated or invalid PE file: {e.value}') exit -1 # Log every instruction emulated def hook_code(self, a, address, size, user_data): instruction = self.uc.mem_read(address, size) # Manual Breakpoint if self.breakpoint: if hex(address) == self.breakpoint: final_esp = self.uc.reg_read(UC_X86_REG_ESP) final_ebp = self.uc.reg_read(UC_X86_REG_EBP) self.uc.emu_stop() self.calltrace.append('breakpoint') print('[+] Breakpoint hits at 0x%08x' % int(self.breakpoint, 16)) return # Out of function range for i in md.disasm(instruction, address): #if 'int' in i.mnemonic: #original_eip = self.uc.reg_read(UC_X86_REG_EIP) #self.uc.reg_write(UC_X86_REG_EIP, original_eip + len(i.bytes)) #return if i.mnemonic == 'add' and i.op_str == 'byte ptr [eax], al': print('[!] End of the main emulation thread') self.uc.emu_stop() return # Bypass traps to debuger #if str(i.mnemonic) == 'int3': # if not self.silent: # print('> Tracing intruction ' + hex(i.address), ':', i.mnemonic, i.op_str) # original_eip = self.uc.reg_read(UC_X86_REG_EIP) # self.uc.reg_write(UC_X86_REG_EIP, original_eip + len(i.bytes)) if str(i.mnemonic) == 'call' and 'dword ptr [' in i.op_str: target = i.op_str.split('[')[1].split(']')[0] if target not in self.raw_IAT and self.silent: # print('[CHECKME]> Tracing intruction ' + hex(i.address), ':', i.mnemonic, i.op_str) self.hook_syscall(i.op_str, 'call', i.address, i.bytes) else: self.hook_syscall(i.op_str, 'call', i.address, i.bytes) elif str(i.mnemonic) == 'call': #print('[Debug]', i.mnemonic, i.op_str) self.hook_syscall(i.op_str, 'call', i.address, i.bytes) elif str(i.mnemonic) == 'jmp' and 'dword ptr [' in i.op_str: target = i.op_str.split('[')[1].split(']')[0] if i.op_str in regs: dest_addr = '0x%08x' % eval('self.uc.reg_read(UC_X86_REG_' + i.op_str.replace(' ','').upper() + ')') elif ('+' in i.op_str or '-' in i.op_str or '*' in i.op_str): left_elem = i.op_str.split('[')[1].split(']')[0].split(' ')[0].replace(' ', '') operator = i.op_str.split('[')[1].split(']')[0].split(' ')[1].replace(' ', '') right_elem = i.op_str.split('[')[1].split(']')[0].split(' ')[2].replace(' ', '') # call/jmp [eax+4] if left_elem in regs: left_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + left_elem.upper()))) dest_addr_ptr = '0x%08x' % eval(left_value + operator + right_elem) content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4) target = '0x%08x' % struct.unpack('I', content)[0] # call/jmp [eax*4 + 10] elif '+' in left_elem or '-' in left_elem or '*' in left_elem: lleft_elem = left_elem.split('*')[0].split('-')[0].split('+')[0] lleft_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + lleft_elem.upper()))) lleft_op = left_elem.replace(lleft_elem, lleft_value) dest_addr_ptr = '0x%08x' % eval(lleft_op + operator + right_elem) content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4) target = '0x%06x' % struct.unpack('I', content)[0] else: print('[-] Something went terribly wrong') exit(1) else: target = i.op_str.split('[')[1].split(']')[0] if target not in self.raw_IAT: #self.hook_syscall(i.op_str, 'jmp', i.address, i.bytes) if not self.silent: print('> Tracing intruction ' + hex(i.address), ':', i.mnemonic, i.op_str) #return self.hook_syscall(i.op_str, 'jmp', i.address, i.bytes) else: self.hook_syscall(i.op_str, 'jmp', i.address, i.bytes) else: if not self.silent: print('> Tracing intruction ' + hex(i.address), ':', i.mnemonic, i.op_str) # Hook and trace syscalls def hook_syscall(self, instruction, mnemonic, addr, byte): if self.execution_mode == 'thread': self.thread_trace.append(addr) dup_api = {i:self.thread_trace.count(i) for i in self.thread_trace} for elem in dup_api: rep = dup_api[elem] if rep >= self.thread_max_replay: self.uc.emu_stop() if self.debug: print('[!] Thread stoped due to it\'s repetition (infinite loop)') return is_ptr = False if '[' in instruction: is_ptr = True try: if instruction in regs: dest_addr = '0x%08x' % eval('self.uc.reg_read(UC_X86_REG_' + instruction.replace(' ','').upper() + ')') elif ('+' in instruction or '-' in instruction) and is_ptr: left_elem = instruction.split('[')[1].split(']')[0].split(' ')[0].replace(' ', '') operator = instruction.split('[')[1].split(']')[0].split(' ')[1].replace(' ', '') right_elem = instruction.split('[')[1].split(']')[0].split(' ')[2].replace(' ', '') # call/jmp [eax+4] if left_elem in regs: left_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + left_elem.upper()))) dest_addr_ptr = '0x%08x' % eval(left_value + operator + right_elem) content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4) dest_addr = '0x%08x' % struct.unpack('I', content)[0] # call/jmp [eax*4 + 10] elif '+' in left_elem or '-' in left_elem or '*' in left_elem: lleft_elem = left_elem.split('*')[0].split('-')[0].split('+')[0] lleft_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + lleft_elem.upper()))) lleft_op = left_elem.replace(lleft_elem, lleft_value) dest_addr_ptr = '0x%08x' % eval(lleft_op + operator + right_elem) content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4) dest_addr = '0x%08x' % struct.unpack('I', content)[0] else: print('[-] Something went terribly wrong') exit(1) else: dest_addr = '0x' + instruction.split('0x')[1].replace(']','') except: print('[-] Weird call at 0x%08X, investigate me ! "%s %s"' % (addr, mnemonic, instruction)) return # Are we calling a function from the IAT in a weird way ? #print(self.IAT) if str(dest_addr) in self.IAT_hook.values(): target_iat_call = list(self.IAT_hook.keys())[list(self.IAT_hook.values()).index(dest_addr)] for dll in self.IAT: for func_addr in self.IAT[dll]: func_name = self.IAT[dll].get(func_addr) if func_name == target_iat_call: #print('[*] IAT call detected:', target_iat_call, func_addr) dest_addr = func_addr break #return # Is this targeting the IAT or a mapped function ? api_name_tmp = None IAT_entry = list(self.raw_IAT.keys()) if dest_addr not in IAT_entry: if is_ptr: raw_ptr = self.uc.mem_read(int(dest_addr, 16), 0x4) ptr = '0x%08x' % struct.unpack('<I', raw_ptr)[0] if ptr in self.IAT_hook.values(): try: api_name_tmp = [k for k,v in self.IAT_hook.items() if v == ptr][0] except: api_name_tmp = None else: if not self.silent: print('> Tracing intruction ' + hex(addr), ':', mnemonic, self.shortname + '.' + str(instruction) ) print('> Following function ' + self.shortname + '.' + str(instruction) + ':') if self.trace: self.calltrace.append(self.shortname + '.' + str(instruction)) return if api_name_tmp == None: try: api_name = self.raw_IAT[dest_addr] except: return else: api_name = api_name_tmp is_valid, description, args, args_count = self.extract_API_args(api_name) if not is_valid: if self.debug: print('[!] Unknown call destination, fix me dude') self.uc.emu_stop() if is_ptr: api_name = '&' + api_name display_line = instruction.replace(dest_addr, api_name) if not self.silent: print('> Tracing intruction ' + hex(addr), ':', mnemonic, display_line) # print('> Tracing intruction ' + hex(addr), ': call', display_line + ' #' + description) if mnemonic == 'call': self.fake_syscall(addr, args_count, api_name, byte, 0x0) # Return 0 by default elif mnemonic == 'jmp': self.fake_jmpcall(addr, args_count, api_name, byte, 0x0) # Read <size> bytes from the stack address <start> def read_stack(self, start, size): print('=========== Stack Dump ==========') final_stack = self.uc.mem_read(start, size) stack_addr = start for x in range(0, size // 4): stack_addr += 4 stack_content = final_stack[0:4] final_stack = final_stack[4:] stack_value = struct.unpack('I', stack_content)[0] print('0x%08x : 0x%08x' % (stack_addr, stack_value)) # Fake syscall function def fake_syscall(self, addr, args_count, api, opcode, ret_value): api_name = api.replace('&', '') display = '> ' + hex(addr) + ': ' + api_name + '(' current_esp = self.uc.reg_read(UC_X86_REG_ESP) val = self.uc.mem_read(current_esp, 4*args_count) loc_esp = self.uc.reg_read(UC_X86_REG_ESP) args = [] for x in range(0, args_count): value = self.read_byte(loc_esp + (x*4)) args.append(hex(value)) # Test weather or not a special hook exist if api_name in dir(winsyscall): # This API need to be intercept with a special hardcoded hook function = getattr(winsyscall, api_name) ret_code, ret_args = function(self, args) if ret_code == 'THREAD': taddr = int(self.threads[-1]) ret_code = 0x1 for elem in self.handle: hval = self.handle[elem][0] if hval == taddr: ret_code = int(elem, 16) break if self.debug: print('[!] Spawning a new thread at ' + hex(self.threads[-1])) if ret_args == 'EXIT': print(display + '0x0)') self.uc.emu_stop() return display += str(ret_args).replace('[', '').replace(']','').replace("'", '') + ') = ' if ret_code != None: display += hex(ret_code) else: display += str(ret_code) else: clean_stack(self, args_count) ret_code = 0x0 display += str(args).replace('[', '').replace(']', '').replace("'", '') + ') = ' display += hex(ret_code) # Avoid dead end / infinite loop if len(self.current_loop) < self.max_loop: self.current_loop.append(addr) elif len(self.current_loop) == self.max_loop: if self.previous_loop.sort() == self.current_loop.sort(): if self.current_loop_counter == self.max_loop: print('[!] Inifinite loop detected, stoping the emulation') self.uc.emu_stop() return self.current_loop = [] self.current_loop_counter += 1 else: self.previous_loop = self.current_loop print(display) # Does the function return something ? if ret_code != None: # Fake return code to 0 self.uc.reg_write(UC_X86_REG_EAX, ret_code) # Redirect EIP original_eip = self.uc.reg_read(UC_X86_REG_EIP) self.uc.reg_write(UC_X86_REG_EIP, original_eip + len(opcode)) # Pop a value from the stack def popstack(self): current_esp = self.uc.reg_read(UC_X86_REG_ESP) val = self.uc.mem_read(current_esp, 4) stack_value = struct.unpack('I', val)[0] return stack_value # Decrement the stack value def decstack(self):
# Read a 4 byte value at a given address def read_byte(self, addr): val = self.uc.mem_read(addr, 4) formated_value = struct.unpack('I', val)[0] return formated_value # Fake jmp to syscall ptr def fake_jmpcall(self, addr, args_count, api, opcode, ret_value): display = '> ' + hex(addr) + ': ' + api.replace('&', '') + '(' ret = self.popstack() self.decstack() loc_esp = self.uc.reg_read(UC_X86_REG_ESP) loc_args = [] for x in range(0, args_count): value = self.read_byte(loc_esp + (x*4)) loc_args.append(hex(value)) # display += str(loc_args).replace('[', '').replace(']', '').replace("'", '') + '' args = loc_args api_name = api.replace('&', '') if api_name in dir(winsyscall): # This API need to be intercept with a special hardcoded hook function = getattr(winsyscall, api_name) ret_code, ret_args = function(self, args) if ret_code == 'THREAD': taddr = int(self.threads[-1]) ret_code = 0x1 for elem in self.handle: hval = self.handle[elem][0] if hval == taddr: ret_code = int(elem, 16) break if self.debug: print('[!] Spawning a new thread at ' + hex(self.threads[-1])) if ret_args == 'EXIT': print(display + '0x0)') self.uc.emu_stop() return display += str(ret_args).replace('[', '').replace(']','').replace("'", '') + ') = ' if ret_code != None: display += hex(ret_code) else: display += str(ret_code) else: # clean_stack(self, args_count) ret_code = 0x0 display += str(args).replace('[', '').replace(']', '').replace("'", '') + ') = ' display += hex(ret_code) # Avoid dead end / infinite loop if len(self.current_loop) < self.max_loop: self.current_loop.append(addr) elif len(self.current_loop) == self.max_loop: if self.previous_loop.sort() == self.current_loop.sort(): if self.current_loop_counter == self.max_loop: print('[!] Inifinite loop detected, stoping the emulation') self.uc.emu_stop() return self.current_loop = [] self.current_loop_counter += 1 else: self.previous_loop = self.current_loop print(display) # Does the function return something ? if ret_code != None: # Fake return code to 0 self.uc.reg_write(UC_X86_REG_EAX, ret_code) else: # Fake return code to 0 self.uc.reg_write(UC_X86_REG_EAX, 0x0) # Redirect EIP self.uc.reg_write(UC_X86_REG_EIP, ret) # Print a list of used handles def read_handle(self): print('========= Opened Handles ========') for h in self.handle: handle_addr = h handle_value = self.handle[h][0] handle_type = self.handle[h][1] if handle_type == 'dummy': continue if len(str(handle_value)) > 50: handle_value = str(handle_value)[:25] + '[...]' + str(handle_value)[-9:] print('Address=' + str(handle_addr) + ' Type=' + str(handle_type) + ' Value=' + str(handle_value) ) # Show and extract potentials payloads def display_extracts(self): # Search Binary in allocated memory regions for vmem in self.virtual_memory: content = self.uc.mem_read(vmem.data_address, vmem.data_size) if content[:2] == b'MZ': self.extracts['hmemory_' + hex(vmem.data_address)] = content print('======= Extracted Payloads =======') if len(self.extracts) == 0: print('Nothing found') return dirname = './' + self.shortname + '_emu' if not os.path.exists(dirname): os.makedirs(dirname) counter = 0 for entry in self.extracts: name = entry[1:] options = '' data = self.extracts[entry] if len(str(data)) > 50: sdata = str(data)[:25] + '[...]' + str(data)[-9:] else: sdata = data if data[:2] == b'MZ' or data[:2] == 'MZ': options = ' (PE payload detected)' print('Name="' + name + '" Content="' + sdata + '"' + options) fname = name.split('\\')[-1] if fname == '': fname = 'generic_extract_' + str(counter) + '.bin' f = open(dirname + '/' + fname, 'wb') f.write(data) f.close() # Print a list of dynamically resolved functions def read_dynamic_imports(self): print('========= Dynamic Imports =======') if len(self.dynamics) == 0x0: print('No dynamic imports where detected during the emulation') for i in self.dynamics: print('Address=', i[0], ' Name=', i[1]) # Print a dump of the current registers def read_full_regs(self): print('=== Registers Dump ===') print('EAX: 0x%08x | EBP: 0x%08x' % (self.uc.reg_read(UC_X86_REG_EAX), self.uc.reg_read(UC_X86_REG_EBP))) print('EBX: 0x%08x | ESP: 0x%08x' % (self.uc.reg_read(UC_X86_REG_EBX), self.uc.reg_read(UC_X86_REG_ESP))) print('ECX: 0x%08x | ESI: 0x%08x' % (self.uc.reg_read(UC_X86_REG_ECX), self.uc.reg_read(UC_X86_REG_ESI))) print('EDX: 0x%08x | EDI: 0x%08x' % (self.uc.reg_read(UC_X86_REG_EDX), self.uc.reg_read(UC_X86_REG_EDI))) print('EIP: 0x%08x ' % self.uc.reg_read(UC_X86_REG_EIP)) # Retreive the corresponding Windows API in our list def extract_API_args(self, api_name): with open(API_refs) as f: line = next((l for l in f if api_name == l.split(';')[0]), None) if line == None or line == '': # We're fucked mate return False, '', '', 0 name = line.split(';')[0] description = line.split(';')[1].split(';')[0] args = line.split(';')[2] args_count = args.count(',') + 1 if args_count == 1 and args.replace('\n', '').replace(' ','') == '': args_count = 0 if args == '' or args == None: # We're double fucked maaaatee # print('[!] Cannot gather arguments count and type, fix me') return True, description, '', 0 return True, description, args, args_count # Setup a fake IAT def generate_Import_Address_Table(self): self.IAT = {} self.raw_IAT = {} dll_count = 0 functions_count = 0 for entry in self.pe.DIRECTORY_ENTRY_IMPORT: functions = {} dll_count += 1 for imp in entry.imports: functions_count += 1 #print(imp.name.decode()) functions[hex(imp.address)] = imp.name.decode() self.raw_IAT[hex(imp.address)] = imp.name.decode() self.IAT[entry.dll.lower().decode()] = functions self.IAT['dynamic_import'] = {'0x00ff0000': 'placeholder_dynamic_import'} if self.debug: print('[DEBUG] ' + str(functions_count) + ' functions imported in the IAT from ' + str(dll_count) + ' DLL') # Setup a hook structure for the IAT def hook_Import_Address_Table(self): self.IAT_hook = {} cnt = 0 for dll in self.IAT: if dll == 'dynamic_import': continue for entry_addr in self.IAT[dll]: entry = self.IAT[dll][entry_addr] #self.uc.mem_write(int(entry_addr, 16), bytes([cnt])) content = self.uc.mem_read(int(entry_addr, 16), 0x4) value = '0x' + struct.pack("<I", int(bytes(content).hex(), 16)).hex() self.IAT_hook[entry] = value cnt += 1 #print(self.IAT_hook) if self.debug: print('[DEBUG] ' + str(cnt) + ' IAT entry where hooked') # Setup the process TIB structure def generate_Thread_Information_Block(self): self.TEB_base_addr = 0x200000 self.process_ID = 0x1908 self.thread_ID = 0x10C self.PEB_base_addr = self.TEB_base_addr + 0x1000 TEB = b'' TEB += struct.pack("<I", 0xffffffff) # FS:[0x00] Structure Exception Handler (SEH) TEB += struct.pack("<I", (self.stack_addr + self.stack_size)) # FS:[0x04] Stack Base TEB += struct.pack("<I", self.stack_addr) # FS:[0x08] Stack Limit TEB += struct.pack("<I", 0x0) # FS:[0x0C] Subsystem TIB TEB += struct.pack("<I", 0x0) # FS:[0x10] Fiber Data TEB += struct.pack("<I", 0x0) # FS:[0x14] Arbitrary Data Slot TEB += struct.pack("<I", self.TEB_base_addr) # FS:[0x18] Linear Address of TEB TEB += struct.pack("<I", 0x0) # FS:[0x1C] Environment Pointer TEB += struct.pack("<I", self.process_ID) # FS:[0x20] Process ID TEB += struct.pack("<I", self.thread_ID) # FS:[0x24] Current Thread ID TEB += struct.pack("<I", 0x0) # FS:[0x28] Active RPC Handle TEB += struct.pack("<I", 0x0) # FS:[0x2C] Linear Address of the thread-local storage array TEB += struct.pack("<I", self.PEB_base_addr) # FS:[0x30] Linear Address of the Process Environment Block (PEB) page_size=4096 m = 0x5000 % page_size f = page_size - m aligned_size = 0x5000 + f # Map and write the TEB in memory self.uc.mem_map(self.TEB_base_addr, aligned_size) self.uc.mem_write(self.TEB_base_addr, TEB) def launch(self): # Get header most importants fields self.header_image_base = self.pe.OPTIONAL_HEADER.ImageBase self.header_size_of_image = self.pe.OPTIONAL_HEADER.SizeOfImage self.header_entrypoint = self.pe.OPTIONAL_HEADER.AddressOfEntryPoint self.mapped_image = self.pe.get_memory_mapped_image(ImageBase=self.header_image_base) self.mapped_size = (len(self.mapped_image) + 0x1000) & ~0xFFF self.exit_addr = 0xfffff000 # Redirect to file if self.out != None: sys.stdout = open(self.out, "w") # Get virtual size needed for PE mapping min_offset = sys.maxsize virtual_size = 0 for section in self.pe.sections: min_offset = section.VirtualAddress virtual_size += min_offset virtual_size += min_offset m = virtual_size % 4096 f = 4096 - m aligned_virtual_size = virtual_size + f # Map the binary in memory self.uc.mem_map(self.header_image_base, self.mapped_size) self.uc.mem_write(self.header_image_base, self.mapped_image) self.start_addr = self.header_entrypoint + self.header_image_base if self.debug: print('[DEBUG] Binary mapped in memory at 0x%08x' % self.header_image_base) # Initialize the stack self.stack_addr = 0x0 self.stack_size = 0x200000 self.uc.mem_map(self.stack_addr, self.stack_size) if self.debug: print('[DEBUG] Stack of 0x%x bytes starting at 0x%08x' % (self.stack_size, self.stack_addr)) self.uc.reg_write(UC_X86_REG_ESP, self.stack_addr + self.stack_size - 0x500) self.uc.reg_write(UC_X86_REG_EBP, self.stack_addr + self.stack_size - 0x100) if self.debug: print('[DEBUG] Initial stack frame created between 0x%08x and 0x%08x' % (self.stack_size - 0x500, self.stack_size - 0x100)) # Create a the TEB structure self.generate_Thread_Information_Block() if self.debug: print('[DEBUG] Thread Information Block initiated at 0x%08x' % self.TEB_base_addr) # Create a the PEB structure # TODO # Create a fake IAT self.generate_Import_Address_Table() # Place hooks on the IAT self.hook_Import_Address_Table() # Initiate the registers self.uc.reg_write(UC_X86_REG_EDI, self.start_addr) self.uc.reg_write(UC_X86_REG_ESI, self.start_addr) self.uc.reg_write(UC_X86_REG_EDX, self.start_addr) self.uc.reg_write(UC_X86_REG_ECX, self.start_addr) self.uc.reg_write(UC_X86_REG_EBX, self.PEB_base_addr) # EBP point to the PEB address self.uc.reg_write(UC_X86_REG_EAX, self.TEB_base_addr) # EAX point to the TIB address # Place a debug hook self.uc.hook_add(UC_HOOK_CODE, self.hook_code) # Place a memory debug hook #self.uc.hook_add(UC_ERR_FETCH_UNMAPPED, self.hook_mem_invalid) # Start emulation print('[DEBUG] Starting the emulation of "%s.exe" from 0x%08x' % (self.drivename, self.start_addr)) print() self.uc.emu_start(self.start_addr, self.start_addr + 500000, timeout=20 * UC_SECOND_SCALE) print() if len(self.threads) != 0: uniq_threads = list(dict.fromkeys(self.threads)) else: uniq_threads = False if self.debug: print('[!] Looking for entrypoints in the threads queue') if uniq_threads: for thread_addr in uniq_threads: print('[!] Starting the thread ' + hex(thread_addr)) self.execution_mode = 'thread' self.uc.hook_add(UC_HOOK_CODE, self.hook_code) self.uc.emu_start(thread_addr, self.start_addr + 100, timeout=20 * UC_SECOND_SCALE) #self.uc.reg_write(UC_X86_REG_EIP, add) print('[!] End of the thread ' + hex(thread_addr)) self.thread_trace = [] print() # Display final program's state final_esp = self.uc.reg_read(UC_X86_REG_ESP) final_ebp = self.uc.reg_read(UC_X86_REG_EBP) if args.dynamics: self.read_dynamic_imports() print() if self.stack: self.read_stack(final_esp, final_ebp - final_esp) print() if self.registers: self.read_full_regs() print() if self.handle_list: self.read_handle() print() if self.show_extract: self.display_extracts() print() if self.trace: print('==== Call trace ====') print(' → Entrypoint') for elem in self.calltrace: print(' → ' + elem) if self.out != None: sys.stdout.close() def main(args): emul = Environment(args) emul.launch() parser = argparse.ArgumentParser(description='Windows Binary Emulator') parser.add_argument('-p', '--path', required=True, help='path to the binary file to emulate') parser.add_argument('-b', '--breakpoint', required=False, help='pause the execution at the given address') parser.add_argument('--trace', required=False, action="store_true", help='display the call trace of the binary') parser.add_argument('--dump', required=False, action="store_true", help='display a full dump of the program\'s state after the execution') parser.add_argument('--stack', required=False, action="store_true", help='display a dump of the stack after the execution') parser.add_argument('--registers', required=False, action="store_true", help='display a dump of the regsiters after the execution') parser.add_argument('--debug', required=False, action="store_true", help='display debug messages') parser.add_argument('--silent', required=False, action="store_true", help='only print out the system calls') parser.add_argument('--handle', required=False, action="store_true", help='display the list of used handles') parser.add_argument('--extract', required=False, action="store_true", help='extract potentials payloads found in memory. Files are saved to <bin_name>_emu.out/') parser.add_argument('--imports', required=False, action="store_true", help='UNIMPLEMENTED - display the static content of the import address table (IAT)') parser.add_argument('--dynamics', required=False, action="store_true", help='display the list of dynamically resolved syscall') parser.add_argument('--out', required=False, help='save the emulation output to a file') args = parser.parse_args() main(args)
current_esp = self.uc.reg_read(UC_X86_REG_ESP) self.uc.reg_write(UC_X86_REG_ESP, int(current_esp + 4))
moc_cloudagent_container.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: moc_cloudagent_container.proto package storage import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" wrappers "github.com/golang/protobuf/ptypes/wrappers" common "github.com/microsoft/moc/rpc/common" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type ContainerType int32 const ( ContainerType_UNKNOWN ContainerType = 0 ContainerType_SAN ContainerType = 1 ContainerType_CSV ContainerType = 2 ContainerType_SMB ContainerType = 3 ContainerType_DAS ContainerType = 4 ) var ContainerType_name = map[int32]string{ 0: "UNKNOWN", 1: "SAN", 2: "CSV", 3: "SMB", 4: "DAS", } var ContainerType_value = map[string]int32{ "UNKNOWN": 0, "SAN": 1, "CSV": 2, "SMB": 3, "DAS": 4, } func (x ContainerType) String() string { return proto.EnumName(ContainerType_name, int32(x)) } func (ContainerType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_736e2a9bece4cac4, []int{0} } type ContainerRequest struct { Containers []*Container `protobuf:"bytes,1,rep,name=Containers,proto3" json:"Containers,omitempty"` OperationType common.Operation `protobuf:"varint,2,opt,name=OperationType,proto3,enum=moc.Operation" json:"OperationType,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ContainerRequest) Reset() { *m = ContainerRequest{} } func (m *ContainerRequest) String() string { return proto.CompactTextString(m) } func (*ContainerRequest) ProtoMessage() {} func (*ContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor_736e2a9bece4cac4, []int{0} } func (m *ContainerRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ContainerRequest.Unmarshal(m, b) } func (m *ContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ContainerRequest.Marshal(b, m, deterministic) } func (m *ContainerRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ContainerRequest.Merge(m, src) } func (m *ContainerRequest) XXX_Size() int { return xxx_messageInfo_ContainerRequest.Size(m) } func (m *ContainerRequest) XXX_DiscardUnknown() { xxx_messageInfo_ContainerRequest.DiscardUnknown(m) } var xxx_messageInfo_ContainerRequest proto.InternalMessageInfo func (m *ContainerRequest) GetContainers() []*Container { if m != nil { return m.Containers } return nil } func (m *ContainerRequest) GetOperationType() common.Operation { if m != nil { return m.OperationType } return common.Operation_GET } type ContainerResponse struct { Containers []*Container `protobuf:"bytes,1,rep,name=Containers,proto3" json:"Containers,omitempty"` Result *wrappers.BoolValue `protobuf:"bytes,2,opt,name=Result,proto3" json:"Result,omitempty"` Error string `protobuf:"bytes,3,opt,name=Error,proto3" json:"Error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ContainerResponse) Reset() { *m = ContainerResponse{} } func (m *ContainerResponse) String() string { return proto.CompactTextString(m) } func (*ContainerResponse) ProtoMessage() {} func (*ContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor_736e2a9bece4cac4, []int{1} } func (m *ContainerResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ContainerResponse.Unmarshal(m, b) } func (m *ContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ContainerResponse.Marshal(b, m, deterministic) } func (m *ContainerResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ContainerResponse.Merge(m, src) } func (m *ContainerResponse) XXX_Size() int { return xxx_messageInfo_ContainerResponse.Size(m) } func (m *ContainerResponse) XXX_DiscardUnknown() { xxx_messageInfo_ContainerResponse.DiscardUnknown(m) } var xxx_messageInfo_ContainerResponse proto.InternalMessageInfo func (m *ContainerResponse) GetContainers() []*Container { if m != nil { return m.Containers } return nil } func (m *ContainerResponse) GetResult() *wrappers.BoolValue { if m != nil { return m.Result } return nil } func (m *ContainerResponse) GetError() string { if m != nil { return m.Error } return "" } type Container struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` Status *common.Status `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"` LocationName string `protobuf:"bytes,6,opt,name=locationName,proto3" json:"locationName,omitempty"` Info *common.StorageContainerInfo `protobuf:"bytes,7,opt,name=info,proto3" json:"info,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Container) Reset() { *m = Container{} } func (m *Container) String() string { return proto.CompactTextString(m) } func (*Container) ProtoMessage() {} func (*Container) Descriptor() ([]byte, []int) { return fileDescriptor_736e2a9bece4cac4, []int{2} } func (m *Container) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Container.Unmarshal(m, b) } func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Container.Marshal(b, m, deterministic) } func (m *Container) XXX_Merge(src proto.Message) { xxx_messageInfo_Container.Merge(m, src) } func (m *Container) XXX_Size() int { return xxx_messageInfo_Container.Size(m) } func (m *Container) XXX_DiscardUnknown() { xxx_messageInfo_Container.DiscardUnknown(m) } var xxx_messageInfo_Container proto.InternalMessageInfo func (m *Container) GetName() string { if m != nil { return m.Name } return "" } func (m *Container) GetId() string { if m != nil { return m.Id } return "" } func (m *Container) GetPath() string { if m != nil { return m.Path } return "" } func (m *Container) GetStatus() *common.Status { if m != nil { return m.Status } return nil } func (m *Container) GetLocationName() string { if m != nil { return m.LocationName } return "" } func (m *Container) GetInfo() *common.StorageContainerInfo { if m != nil { return m.Info } return nil } func init() { proto.RegisterEnum("moc.cloudagent.storage.ContainerType", ContainerType_name, ContainerType_value) proto.RegisterType((*ContainerRequest)(nil), "moc.cloudagent.storage.ContainerRequest") proto.RegisterType((*ContainerResponse)(nil), "moc.cloudagent.storage.ContainerResponse") proto.RegisterType((*Container)(nil), "moc.cloudagent.storage.Container") } func init() { proto.RegisterFile("moc_cloudagent_container.proto", fileDescriptor_736e2a9bece4cac4) }
// 465 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x52, 0xd1, 0x8a, 0xd3, 0x40, 0x14, 0xdd, 0xb4, 0xdd, 0x94, 0xde, 0xba, 0x25, 0x0e, 0xa2, 0xb1, 0xc8, 0x52, 0xe3, 0x4b, 0x15, 0x9c, 0x60, 0xf4, 0x03, 0x6c, 0x57, 0x1f, 0x16, 0x31, 0x0b, 0xa9, 0xae, 0xe0, 0x4b, 0x99, 0x4e, 0xa7, 0xd9, 0x60, 0x32, 0x37, 0x3b, 0x33, 0x51, 0xfc, 0x06, 0x7f, 0xc2, 0xff, 0xf0, 0xe7, 0x24, 0x93, 0x6c, 0xda, 0x05, 0x41, 0x1f, 0x7c, 0xca, 0xcd, 0x39, 0x67, 0xce, 0x9c, 0x9c, 0x1b, 0x38, 0x2d, 0x90, 0xaf, 0x79, 0x8e, 0xd5, 0x96, 0xa5, 0x42, 0x9a, 0x35, 0x47, 0x69, 0x58, 0x26, 0x85, 0xa2, 0xa5, 0x42, 0x83, 0xe4, 0x7e, 0x81, 0x9c, 0xee, 0x79, 0xaa, 0x0d, 0x2a, 0x96, 0x8a, 0xe9, 0x69, 0x8a, 0x98, 0xe6, 0x22, 0xb4, 0xaa, 0x4d, 0xb5, 0x0b, 0xbf, 0x29, 0x56, 0x96, 0x42, 0xe9, 0xe6, 0xdc, 0xf4, 0x81, 0xf5, 0xc5, 0xa2, 0x40, 0xd9, 0x3e, 0x5a, 0xe2, 0xd1, 0x01, 0xd1, 0x9a, 0x65, 0x72, 0x87, 0x0d, 0x1b, 0xfc, 0x70, 0xc0, 0x3b, 0xbb, 0x89, 0x90, 0x88, 0xeb, 0x4a, 0x68, 0x43, 0x16, 0x00, 0x1d, 0xa6, 0x7d, 0x67, 0xd6, 0x9f, 0x8f, 0xa3, 0xc7, 0xf4, 0xcf, 0xc1, 0xe8, 0xfe, 0xf4, 0xc1, 0x21, 0xf2, 0x0a, 0x4e, 0x2e, 0x4a, 0xa1, 0x98, 0xc9, 0x50, 0x7e, 0xf8, 0x5e, 0x0a, 0xbf, 0x37, 0x73, 0xe6, 0x93, 0x68, 0x62, 0x5d, 0x3a, 0x26, 0xb9, 0x2d, 0x0a, 0x7e, 0x3a, 0x70, 0xf7, 0x20, 0x8d, 0x2e, 0x51, 0x6a, 0xf1, 0x3f, 0xe2, 0x44, 0xe0, 0x26, 0x42, 0x57, 0xb9, 0xb1, 0x39, 0xc6, 0xd1, 0x94, 0x36, 0x75, 0xd2, 0x9b, 0x3a, 0xe9, 0x12, 0x31, 0xbf, 0x64, 0x79, 0x25, 0x92, 0x56, 0x49, 0xee, 0xc1, 0xf1, 0x5b, 0xa5, 0x50, 0xf9, 0xfd, 0x99, 0x33, 0x1f, 0x25, 0xcd, 0x4b, 0xf0, 0xcb, 0x81, 0x51, 0x67, 0x4c, 0x08, 0x0c, 0x24, 0x2b, 0x84, 0xef, 0x58, 0x89, 0x9d, 0xc9, 0x04, 0x7a, 0xd9, 0xd6, 0xde, 0x33, 0x4a, 0x7a, 0xd9, 0xb6, 0xd6, 0x94, 0xcc, 0x5c, 0xf9, 0x83, 0x46, 0x53, 0xcf, 0xe4, 0x09, 0xb8, 0xda, 0x30, 0x53, 0x69, 0xff, 0xd8, 0xe6, 0x19, 0xdb, 0xcf, 0x59, 0x59, 0x28, 0x69, 0x29, 0x12, 0xc0, 0x9d, 0x1c, 0xb9, 0x6d, 0x27, 0xae, 0x2f, 0x71, 0xad, 0xc1, 0x2d, 0x8c, 0x3c, 0x87, 0x41, 0xbd, 0x4d, 0x7f, 0x68, 0x6d, 0x1e, 0xb6, 0x36, 0xb6, 0x8a, 0x2e, 0xe5, 0xb9, 0xdc, 0x61, 0x62, 0x65, 0xcf, 0x5e, 0xc3, 0x49, 0x07, 0xd7, 0x8d, 0x93, 0x31, 0x0c, 0x3f, 0xc6, 0xef, 0xe2, 0x8b, 0x4f, 0xb1, 0x77, 0x44, 0x86, 0xd0, 0x5f, 0x2d, 0x62, 0xcf, 0xa9, 0x87, 0xb3, 0xd5, 0xa5, 0xd7, 0xb3, 0xc8, 0xfb, 0xa5, 0xd7, 0xaf, 0x87, 0x37, 0x8b, 0x95, 0x37, 0x88, 0xae, 0x61, 0xd2, 0x39, 0x2c, 0xea, 0xe6, 0xc9, 0x1a, 0xdc, 0x73, 0xf9, 0x15, 0xbf, 0x08, 0x32, 0xff, 0xfb, 0x52, 0x9a, 0x3f, 0x6c, 0xfa, 0xf4, 0x1f, 0x94, 0xcd, 0xf6, 0x83, 0xa3, 0xe5, 0x8b, 0xcf, 0x61, 0x9a, 0x99, 0xab, 0x6a, 0x43, 0x39, 0x16, 0x61, 0x91, 0x71, 0x85, 0x1a, 0x77, 0x26, 0x2c, 0x90, 0x87, 0xaa, 0xe4, 0xe1, 0xde, 0x26, 0x6c, 0x6d, 0x36, 0xae, 0xdd, 0xeb, 0xcb, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5a, 0x67, 0x0b, 0xc9, 0x6e, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // ContainerAgentClient is the client API for ContainerAgent service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ContainerAgentClient interface { Invoke(ctx context.Context, in *ContainerRequest, opts ...grpc.CallOption) (*ContainerResponse, error) } type containerAgentClient struct { cc *grpc.ClientConn } func NewContainerAgentClient(cc *grpc.ClientConn) ContainerAgentClient { return &containerAgentClient{cc} } func (c *containerAgentClient) Invoke(ctx context.Context, in *ContainerRequest, opts ...grpc.CallOption) (*ContainerResponse, error) { out := new(ContainerResponse) err := c.cc.Invoke(ctx, "/moc.cloudagent.storage.ContainerAgent/Invoke", in, out, opts...) if err != nil { return nil, err } return out, nil } // ContainerAgentServer is the server API for ContainerAgent service. type ContainerAgentServer interface { Invoke(context.Context, *ContainerRequest) (*ContainerResponse, error) } // UnimplementedContainerAgentServer can be embedded to have forward compatible implementations. type UnimplementedContainerAgentServer struct { } func (*UnimplementedContainerAgentServer) Invoke(ctx context.Context, req *ContainerRequest) (*ContainerResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Invoke not implemented") } func RegisterContainerAgentServer(s *grpc.Server, srv ContainerAgentServer) { s.RegisterService(&_ContainerAgent_serviceDesc, srv) } func _ContainerAgent_Invoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ContainerRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ContainerAgentServer).Invoke(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/moc.cloudagent.storage.ContainerAgent/Invoke", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ContainerAgentServer).Invoke(ctx, req.(*ContainerRequest)) } return interceptor(ctx, in, info, handler) } var _ContainerAgent_serviceDesc = grpc.ServiceDesc{ ServiceName: "moc.cloudagent.storage.ContainerAgent", HandlerType: (*ContainerAgentServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Invoke", Handler: _ContainerAgent_Invoke_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "moc_cloudagent_container.proto", }
var fileDescriptor_736e2a9bece4cac4 = []byte{
mod.rs
// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Composable external iteration. //! //! If you've found yourself with a collection of some kind, and needed to //! perform an operation on the elements of said collection, you'll quickly run //! into 'iterators'. Iterators are heavily used in idiomatic Rust code, so //! it's worth becoming familiar with them. //! //! Before explaining more, let's talk about how this module is structured: //! //! # Organization //! //! This module is largely organized by type: //! //! * [Traits] are the core portion: these traits define what kind of iterators //! exist and what you can do with them. The methods of these traits are worth //! putting some extra study time into. //! * [Functions] provide some helpful ways to create some basic iterators. //! * [Structs] are often the return types of the various methods on this //! module's traits. You'll usually want to look at the method that creates //! the `struct`, rather than the `struct` itself. For more detail about why, //! see '[Implementing Iterator](#implementing-iterator)'. //! //! [Traits]: #traits //! [Functions]: #functions //! [Structs]: #structs //! //! That's it! Let's dig into iterators. //! //! # Iterator //! //! The heart and soul of this module is the [`Iterator`] trait. The core of //! [`Iterator`] looks like this: //! //! ``` //! trait Iterator { //! type Item; //! fn next(&mut self) -> Option<Self::Item>; //! } //! ``` //! //! An iterator has a method, [`next`], which when called, returns an //! [`Option`]`<Item>`. [`next`] will return `Some(Item)` as long as there //! are elements, and once they've all been exhausted, will return `None` to //! indicate that iteration is finished. Individual iterators may choose to //! resume iteration, and so calling [`next`] again may or may not eventually //! start returning `Some(Item)` again at some point. //! //! [`Iterator`]'s full definition includes a number of other methods as well, //! but they are default methods, built on top of [`next`], and so you get //! them for free. //! //! Iterators are also composable, and it's common to chain them together to do //! more complex forms of processing. See the [Adapters](#adapters) section //! below for more details. //! //! [`Iterator`]: trait.Iterator.html //! [`next`]: trait.Iterator.html#tymethod.next //! [`Option`]: ../../std/option/enum.Option.html //! //! # The three forms of iteration //! //! There are three common methods which can create iterators from a collection: //! //! * `iter()`, which iterates over `&T`. //! * `iter_mut()`, which iterates over `&mut T`. //! * `into_iter()`, which iterates over `T`. //! //! Various things in the standard library may implement one or more of the //! three, where appropriate. //! //! # Implementing Iterator //! //! Creating an iterator of your own involves two steps: creating a `struct` to //! hold the iterator's state, and then `impl`ementing [`Iterator`] for that //! `struct`. This is why there are so many `struct`s in this module: there is //! one for each iterator and iterator adapter. //! //! Let's make an iterator named `Counter` which counts from `1` to `5`: //! //! ``` //! // First, the struct: //! //! /// An iterator which counts from one to five //! struct Counter { //! count: usize, //! } //! //! // we want our count to start at one, so let's add a new() method to help. //! // This isn't strictly necessary, but is convenient. Note that we start //! // `count` at zero, we'll see why in `next()`'s implementation below. //! impl Counter { //! fn new() -> Counter { //! Counter { count: 0 } //! } //! } //! //! // Then, we implement `Iterator` for our `Counter`: //! //! impl Iterator for Counter { //! // we will be counting with usize //! type Item = usize; //! //! // next() is the only required method //! fn next(&mut self) -> Option<usize> { //! // increment our count. This is why we started at zero. //! self.count += 1; //! //! // check to see if we've finished counting or not. //! if self.count < 6 { //! Some(self.count) //! } else { //! None //! } //! } //! } //! //! // And now we can use it! //! //! let mut counter = Counter::new(); //! //! let x = counter.next().unwrap(); //! println!("{}", x); //! //! let x = counter.next().unwrap(); //! println!("{}", x); //! //! let x = counter.next().unwrap(); //! println!("{}", x); //! //! let x = counter.next().unwrap(); //! println!("{}", x); //! //! let x = counter.next().unwrap(); //! println!("{}", x); //! ``` //! //! This will print `1` through `5`, each on their own line. //! //! Calling `next()` this way gets repetitive. Rust has a construct which can //! call `next()` on your iterator, until it reaches `None`. Let's go over that //! next. //! //! # for Loops and IntoIterator //! //! Rust's `for` loop syntax is actually sugar for iterators. Here's a basic //! example of `for`: //! //! ``` //! let values = vec![1, 2, 3, 4, 5]; //! //! for x in values { //! println!("{}", x); //! } //! ``` //! //! This will print the numbers one through five, each on their own line. But //! you'll notice something here: we never called anything on our vector to //! produce an iterator. What gives? //! //! There's a trait in the standard library for converting something into an //! iterator: [`IntoIterator`]. This trait has one method, [`into_iter`], //! which converts the thing implementing [`IntoIterator`] into an iterator. //! Let's take a look at that `for` loop again, and what the compiler converts //! it into: //! //! [`IntoIterator`]: trait.IntoIterator.html //! [`into_iter`]: trait.IntoIterator.html#tymethod.into_iter //! //! ``` //! let values = vec![1, 2, 3, 4, 5]; //! //! for x in values { //! println!("{}", x); //! } //! ``` //! //! Rust de-sugars this into: //! //! ``` //! let values = vec![1, 2, 3, 4, 5]; //! { //! let result = match IntoIterator::into_iter(values) { //! mut iter => loop { //! let next; //! match iter.next() { //! Some(val) => next = val, //! None => break, //! }; //! let x = next; //! let () = { println!("{}", x); }; //! }, //! }; //! result //! } //! ``` //! //! First, we call `into_iter()` on the value. Then, we match on the iterator //! that returns, calling [`next`] over and over until we see a `None`. At //! that point, we `break` out of the loop, and we're done iterating. //! //! There's one more subtle bit here: the standard library contains an //! interesting implementation of [`IntoIterator`]: //! //! ```ignore (only-for-syntax-highlight) //! impl<I: Iterator> IntoIterator for I //! ``` //! //! In other words, all [`Iterator`]s implement [`IntoIterator`], by just //! returning themselves. This means two things: //! //! 1. If you're writing an [`Iterator`], you can use it with a `for` loop. //! 2. If you're creating a collection, implementing [`IntoIterator`] for it //! will allow your collection to be used with the `for` loop. //! //! # Adapters //! //! Functions which take an [`Iterator`] and return another [`Iterator`] are //! often called 'iterator adapters', as they're a form of the 'adapter //! pattern'. //! //! Common iterator adapters include [`map`], [`take`], and [`filter`]. //! For more, see their documentation. //! //! [`map`]: trait.Iterator.html#method.map //! [`take`]: trait.Iterator.html#method.take //! [`filter`]: trait.Iterator.html#method.filter //! //! # Laziness //! //! Iterators (and iterator [adapters](#adapters)) are *lazy*. This means that //! just creating an iterator doesn't _do_ a whole lot. Nothing really happens //! until you call [`next`]. This is sometimes a source of confusion when //! creating an iterator solely for its side effects. For example, the [`map`] //! method calls a closure on each element it iterates over: //! //! ``` //! # #![allow(unused_must_use)] //! let v = vec![1, 2, 3, 4, 5]; //! v.iter().map(|x| println!("{}", x)); //! ``` //! //! This will not print any values, as we only created an iterator, rather than //! using it. The compiler will warn us about this kind of behavior: //! //! ```text //! warning: unused result which must be used: iterator adaptors are lazy and //! do nothing unless consumed //! ``` //! //! The idiomatic way to write a [`map`] for its side effects is to use a //! `for` loop instead: //! //! ``` //! let v = vec![1, 2, 3, 4, 5]; //! //! for x in &v { //! println!("{}", x); //! } //! ``` //! //! [`map`]: trait.Iterator.html#method.map //! //! The two most common ways to evaluate an iterator are to use a `for` loop //! like this, or using the [`collect`] method to produce a new collection. //! //! [`collect`]: trait.Iterator.html#method.collect //! //! # Infinity //! //! Iterators do not have to be finite. As an example, an open-ended range is //! an infinite iterator: //! //! ``` //! let numbers = 0..; //! ``` //! //! It is common to use the [`take`] iterator adapter to turn an infinite //! iterator into a finite one: //! //! ``` //! let numbers = 0..; //! let five_numbers = numbers.take(5); //! //! for number in five_numbers { //! println!("{}", number); //! } //! ``` //! //! This will print the numbers `0` through `4`, each on their own line. //! //! [`take`]: trait.Iterator.html#method.take #![stable(feature = "rust1", since = "1.0.0")] use cmp; use fmt; use iter_private::TrustedRandomAccess; use usize; #[stable(feature = "rust1", since = "1.0.0")] pub use self::iterator::Iterator; #[unstable(feature = "step_trait", reason = "likely to be replaced by finer-grained traits", issue = "42168")] pub use self::range::Step; #[stable(feature = "rust1", since = "1.0.0")] pub use self::sources::{Repeat, repeat}; #[stable(feature = "iter_empty", since = "1.2.0")] pub use self::sources::{Empty, empty}; #[stable(feature = "iter_once", since = "1.2.0")] pub use self::sources::{Once, once}; #[stable(feature = "rust1", since = "1.0.0")] pub use self::traits::{FromIterator, IntoIterator, DoubleEndedIterator, Extend}; #[stable(feature = "rust1", since = "1.0.0")] pub use self::traits::{ExactSizeIterator, Sum, Product}; #[unstable(feature = "fused", issue = "35602")] pub use self::traits::FusedIterator; #[unstable(feature = "trusted_len", issue = "37572")] pub use self::traits::TrustedLen; mod iterator; mod range; mod sources; mod traits; /// A double-ended iterator with the direction inverted. /// /// This `struct` is created by the [`rev`] method on [`Iterator`]. See its /// documentation for more. /// /// [`rev`]: trait.Iterator.html#method.rev /// [`Iterator`]: trait.Iterator.html #[derive(Clone, Debug)] #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] pub struct Rev<T> { iter: T } #[stable(feature = "rust1", since = "1.0.0")] impl<I> Iterator for Rev<I> where I: DoubleEndedIterator { type Item = <I as Iterator>::Item; #[inline] fn next(&mut self) -> Option<<I as Iterator>::Item> { self.iter.next_back() } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } fn fold<Acc, F>(self, init: Acc, f: F) -> Acc where F: FnMut(Acc, Self::Item) -> Acc, { self.iter.rfold(init, f) } #[inline] fn find<P>(&mut self, predicate: P) -> Option<Self::Item> where P: FnMut(&Self::Item) -> bool { self.iter.rfind(predicate) } #[inline] fn rposition<P>(&mut self, predicate: P) -> Option<usize> where P: FnMut(Self::Item) -> bool { self.iter.position(predicate) } } #[stable(feature = "rust1", since = "1.0.0")] impl<I> DoubleEndedIterator for Rev<I> where I: DoubleEndedIterator { #[inline] fn next_back(&mut self) -> Option<<I as Iterator>::Item> { self.iter.next() } fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc where F: FnMut(Acc, Self::Item) -> Acc, { self.iter.fold(init, f) } fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item> where P: FnMut(&Self::Item) -> bool { self.iter.find(predicate) } } #[stable(feature = "rust1", since = "1.0.0")] impl<I> ExactSizeIterator for Rev<I> where I: ExactSizeIterator + DoubleEndedIterator { fn len(&self) -> usize { self.iter.len() } fn is_empty(&self) -> bool { self.iter.is_empty() } } #[unstable(feature = "fused", issue = "35602")] impl<I> FusedIterator for Rev<I> where I: FusedIterator + DoubleEndedIterator {} #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl<I> TrustedLen for Rev<I> where I: TrustedLen + DoubleEndedIterator {} /// An iterator that clones the elements of an underlying iterator. /// /// This `struct` is created by the [`cloned`] method on [`Iterator`]. See its /// documentation for more. /// /// [`cloned`]: trait.Iterator.html#method.cloned /// [`Iterator`]: trait.Iterator.html #[stable(feature = "iter_cloned", since = "1.1.0")] #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[derive(Clone, Debug)] pub struct Cloned<I> { it: I, } #[stable(feature = "iter_cloned", since = "1.1.0")] impl<'a, I, T: 'a> Iterator for Cloned<I> where I: Iterator<Item=&'a T>, T: Clone { type Item = T; fn next(&mut self) -> Option<T> { self.it.next().cloned() } fn size_hint(&self) -> (usize, Option<usize>) { self.it.size_hint() } fn fold<Acc, F>(self, init: Acc, mut f: F) -> Acc where F: FnMut(Acc, Self::Item) -> Acc, { self.it.fold(init, move |acc, elt| f(acc, elt.clone())) } } #[stable(feature = "iter_cloned", since = "1.1.0")] impl<'a, I, T: 'a> DoubleEndedIterator for Cloned<I> where I: DoubleEndedIterator<Item=&'a T>, T: Clone { fn next_back(&mut self) -> Option<T> { self.it.next_back().cloned() } fn rfold<Acc, F>(self, init: Acc, mut f: F) -> Acc where F: FnMut(Acc, Self::Item) -> Acc, { self.it.rfold(init, move |acc, elt| f(acc, elt.clone())) } } #[stable(feature = "iter_cloned", since = "1.1.0")] impl<'a, I, T: 'a> ExactSizeIterator for Cloned<I> where I: ExactSizeIterator<Item=&'a T>, T: Clone { fn len(&self) -> usize { self.it.len() } fn is_empty(&self) -> bool { self.it.is_empty() } } #[unstable(feature = "fused", issue = "35602")] impl<'a, I, T: 'a> FusedIterator for Cloned<I> where I: FusedIterator<Item=&'a T>, T: Clone {} #[doc(hidden)] unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned<I> where I: TrustedRandomAccess<Item=&'a T>, T: Clone { unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { self.it.get_unchecked(i).clone() } #[inline] fn may_have_side_effect() -> bool { true } } #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl<'a, I, T: 'a> TrustedLen for Cloned<I> where I: TrustedLen<Item=&'a T>, T: Clone {} /// An iterator that repeats endlessly. /// /// This `struct` is created by the [`cycle`] method on [`Iterator`]. See its /// documentation for more. /// /// [`cycle`]: trait.Iterator.html#method.cycle /// [`Iterator`]: trait.Iterator.html #[derive(Clone, Debug)] #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] pub struct Cycle<I> { orig: I, iter: I, } #[stable(feature = "rust1", since = "1.0.0")] impl<I> Iterator for Cycle<I> where I: Clone + Iterator { type Item = <I as Iterator>::Item; #[inline] fn next(&mut self) -> Option<<I as Iterator>::Item> { match self.iter.next() { None => { self.iter = self.orig.clone(); self.iter.next() } y => y } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { // the cycle iterator is either empty or infinite match self.orig.size_hint() { sz @ (0, Some(0)) => sz, (0, _) => (0, None), _ => (usize::MAX, None) } } } #[unstable(feature = "fused", issue = "35602")] impl<I> FusedIterator for Cycle<I> where I: Clone + Iterator {} /// An adapter for stepping iterators by a custom amount. /// /// This `struct` is created by the [`step_by`] method on [`Iterator`]. See /// its documentation for more. /// /// [`step_by`]: trait.Iterator.html#method.step_by /// [`Iterator`]: trait.Iterator.html #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[unstable(feature = "iterator_step_by", reason = "unstable replacement of Range::step_by", issue = "27741")] #[derive(Clone, Debug)] pub struct StepBy<I> { iter: I, step: usize, first_take: bool, } #[unstable(feature = "iterator_step_by", reason = "unstable replacement of Range::step_by", issue = "27741")] impl<I> Iterator for StepBy<I> where I: Iterator { type Item = I::Item; #[inline] fn next(&mut self) -> Option<Self::Item> { if self.first_take { self.first_take = false; self.iter.next() } else { self.iter.nth(self.step) } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let inner_hint = self.iter.size_hint(); if self.first_take { let f = |n| if n == 0 { 0 } else { 1 + (n-1)/(self.step+1) }; (f(inner_hint.0), inner_hint.1.map(f)) } else { let f = |n| n / (self.step+1); (f(inner_hint.0), inner_hint.1.map(f)) } } } // StepBy can only make the iterator shorter, so the len will still fit. #[unstable(feature = "iterator_step_by", reason = "unstable replacement of Range::step_by", issue = "27741")] impl<I> ExactSizeIterator for StepBy<I> where I: ExactSizeIterator {} /// An iterator that strings two iterators together. /// /// This `struct` is created by the [`chain`] method on [`Iterator`]. See its /// documentation for more. /// /// [`chain`]: trait.Iterator.html#method.chain /// [`Iterator`]: trait.Iterator.html #[derive(Clone, Debug)] #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] pub struct Chain<A, B> { a: A, b: B, state: ChainState, } // The iterator protocol specifies that iteration ends with the return value // `None` from `.next()` (or `.next_back()`) and it is unspecified what // further calls return. The chain adaptor must account for this since it uses // two subiterators. // // It uses three states: // // - Both: `a` and `b` are remaining // - Front: `a` remaining // - Back: `b` remaining // // The fourth state (neither iterator is remaining) only occurs after Chain has // returned None once, so we don't need to store this state. #[derive(Clone, Debug)] enum ChainState { // both front and back iterator are remaining Both, // only front is remaining Front, // only back is remaining Back, } #[stable(feature = "rust1", since = "1.0.0")] impl<A, B> Iterator for Chain<A, B> where A: Iterator, B: Iterator<Item = A::Item> { type Item = A::Item; #[inline] fn next(&mut self) -> Option<A::Item> { match self.state { ChainState::Both => match self.a.next() { elt @ Some(..) => elt, None => { self.state = ChainState::Back; self.b.next() } }, ChainState::Front => self.a.next(), ChainState::Back => self.b.next(), } } #[inline] #[rustc_inherit_overflow_checks] fn count(self) -> usize { match self.state { ChainState::Both => self.a.count() + self.b.count(), ChainState::Front => self.a.count(), ChainState::Back => self.b.count(), } } fn fold<Acc, F>(self, init: Acc, mut f: F) -> Acc where F: FnMut(Acc, Self::Item) -> Acc, { let mut accum = init; match self.state { ChainState::Both | ChainState::Front => { accum = self.a.fold(accum, &mut f); } _ => { } } match self.state { ChainState::Both | ChainState::Back => { accum = self.b.fold(accum, &mut f); } _ => { } } accum } #[inline] fn nth(&mut self, mut n: usize) -> Option<A::Item> { match self.state { ChainState::Both | ChainState::Front => { for x in self.a.by_ref() { if n == 0 { return Some(x) } n -= 1; } if let ChainState::Both = self.state { self.state = ChainState::Back; } } ChainState::Back => {} } if let ChainState::Back = self.state { self.b.nth(n) } else { None } } #[inline] fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item> where P: FnMut(&Self::Item) -> bool, { match self.state { ChainState::Both => match self.a.find(&mut predicate) { None => { self.state = ChainState::Back; self.b.find(predicate) } v => v }, ChainState::Front => self.a.find(predicate), ChainState::Back => self.b.find(predicate), } } #[inline] fn last(self) -> Option<A::Item> { match self.state { ChainState::Both => { // Must exhaust a before b. let a_last = self.a.last(); let b_last = self.b.last(); b_last.or(a_last) }, ChainState::Front => self.a.last(), ChainState::Back => self.b.last() } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let (a_lower, a_upper) = self.a.size_hint(); let (b_lower, b_upper) = self.b.size_hint(); let lower = a_lower.saturating_add(b_lower); let upper = match (a_upper, b_upper) { (Some(x), Some(y)) => x.checked_add(y), _ => None }; (lower, upper) } } #[stable(feature = "rust1", since = "1.0.0")] impl<A, B> DoubleEndedIterator for Chain<A, B> where A: DoubleEndedIterator, B: DoubleEndedIterator<Item=A::Item>, { #[inline] fn next_back(&mut self) -> Option<A::Item> { match self.state { ChainState::Both => match self.b.next_back() { elt @ Some(..) => elt, None => { self.state = ChainState::Front; self.a.next_back() } }, ChainState::Front => self.a.next_back(), ChainState::Back => self.b.next_back(), } } fn rfold<Acc, F>(self, init: Acc, mut f: F) -> Acc where F: FnMut(Acc, Self::Item) -> Acc, { let mut accum = init; match self.state { ChainState::Both | ChainState::Back => { accum = self.b.rfold(accum, &mut f); } _ => { } } match self.state { ChainState::Both | ChainState::Front => { accum = self.a.rfold(accum, &mut f); } _ => { } } accum } } // Note: *both* must be fused to handle double-ended iterators. #[unstable(feature = "fused", issue = "35602")] impl<A, B> FusedIterator for Chain<A, B> where A: FusedIterator, B: FusedIterator<Item=A::Item>, {} #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl<A, B> TrustedLen for Chain<A, B> where A: TrustedLen, B: TrustedLen<Item=A::Item>, {} /// An iterator that iterates two other iterators simultaneously. /// /// This `struct` is created by the [`zip`] method on [`Iterator`]. See its /// documentation for more. /// /// [`zip`]: trait.Iterator.html#method.zip /// [`Iterator`]: trait.Iterator.html #[derive(Clone, Debug)] #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] pub struct Zip<A, B> { a: A, b: B, // index and len are only used by the specialized version of zip index: usize, len: usize, } #[stable(feature = "rust1", since = "1.0.0")] impl<A, B> Iterator for Zip<A, B> where A: Iterator, B: Iterator { type Item = (A::Item, B::Item); #[inline] fn next(&mut self) -> Option<Self::Item> { ZipImpl::next(self) } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { ZipImpl::size_hint(self) } } #[stable(feature = "rust1", since = "1.0.0")] impl<A, B> DoubleEndedIterator for Zip<A, B> where A: DoubleEndedIterator + ExactSizeIterator, B: DoubleEndedIterator + ExactSizeIterator, { #[inline] fn next_back(&mut self) -> Option<(A::Item, B::Item)> { ZipImpl::next_back(self) } } // Zip specialization trait #[doc(hidden)] trait ZipImpl<A, B> { type Item; fn new(a: A, b: B) -> Self; fn next(&mut self) -> Option<Self::Item>; fn size_hint(&self) -> (usize, Option<usize>); fn next_back(&mut self) -> Option<Self::Item> where A: DoubleEndedIterator + ExactSizeIterator, B: DoubleEndedIterator + ExactSizeIterator; } // General Zip impl #[doc(hidden)] impl<A, B> ZipImpl<A, B> for Zip<A, B> where A: Iterator, B: Iterator { type Item = (A::Item, B::Item); default fn new(a: A, b: B) -> Self { Zip { a, b, index: 0, // unused len: 0, // unused } } #[inline] default fn next(&mut self) -> Option<(A::Item, B::Item)> { self.a.next().and_then(|x| { self.b.next().and_then(|y| { Some((x, y)) }) }) } #[inline] default fn next_back(&mut self) -> Option<(A::Item, B::Item)> where A: DoubleEndedIterator + ExactSizeIterator, B: DoubleEndedIterator + ExactSizeIterator { let a_sz = self.a.len(); let b_sz = self.b.len(); if a_sz != b_sz { // Adjust a, b to equal length if a_sz > b_sz { for _ in 0..a_sz - b_sz { self.a.next_back(); } } else { for _ in 0..b_sz - a_sz { self.b.next_back(); } } } match (self.a.next_back(), self.b.next_back()) { (Some(x), Some(y)) => Some((x, y)), (None, None) => None, _ => unreachable!(), } } #[inline] default fn size_hint(&self) -> (usize, Option<usize>) { let (a_lower, a_upper) = self.a.size_hint(); let (b_lower, b_upper) = self.b.size_hint(); let lower = cmp::min(a_lower, b_lower); let upper = match (a_upper, b_upper) { (Some(x), Some(y)) => Some(cmp::min(x,y)), (Some(x), None) => Some(x), (None, Some(y)) => Some(y), (None, None) => None }; (lower, upper) } } #[doc(hidden)] impl<A, B> ZipImpl<A, B> for Zip<A, B> where A: TrustedRandomAccess, B: TrustedRandomAccess { fn new(a: A, b: B) -> Self { let len = cmp::min(a.len(), b.len()); Zip { a, b, index: 0, len, } } #[inline] fn next(&mut self) -> Option<(A::Item, B::Item)> { if self.index < self.len { let i = self.index; self.index += 1; unsafe { Some((self.a.get_unchecked(i), self.b.get_unchecked(i))) } } else if A::may_have_side_effect() && self.index < self.a.len() { // match the base implementation's potential side effects unsafe { self.a.get_unchecked(self.index); } self.index += 1; None } else { None } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = self.len - self.index; (len, Some(len)) } #[inline] fn next_back(&mut self) -> Option<(A::Item, B::Item)> where A: DoubleEndedIterator + ExactSizeIterator, B: DoubleEndedIterator + ExactSizeIterator { // Adjust a, b to equal length if A::may_have_side_effect() { let sz = self.a.len(); if sz > self.len { for _ in 0..sz - cmp::max(self.len, self.index) { self.a.next_back(); } } } if B::may_have_side_effect() { let sz = self.b.len(); if sz > self.len { for _ in 0..sz - self.len { self.b.next_back(); } } } if self.index < self.len { self.len -= 1; let i = self.len; unsafe { Some((self.a.get_unchecked(i), self.b.get_unchecked(i))) } } else { None } } } #[stable(feature = "rust1", since = "1.0.0")] impl<A, B> ExactSizeIterator for Zip<A, B> where A: ExactSizeIterator, B: ExactSizeIterator {} #[doc(hidden)] unsafe impl<A, B> TrustedRandomAccess for Zip<A, B> where A: TrustedRandomAccess, B: TrustedRandomAccess, { unsafe fn get_unchecked(&mut self, i: usize) -> (A::Item, B::Item) { (self.a.get_unchecked(i), self.b.get_unchecked(i)) } fn may_have_side_effect() -> bool { A::may_have_side_effect() || B::may_have_side_effect() } } #[unstable(feature = "fused", issue = "35602")] impl<A, B> FusedIterator for Zip<A, B> where A: FusedIterator, B: FusedIterator, {} #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl<A, B> TrustedLen for Zip<A, B> where A: TrustedLen, B: TrustedLen, {} /// An iterator that maps the values of `iter` with `f`. /// /// This `struct` is created by the [`map`] method on [`Iterator`]. See its /// documentation for more. /// /// [`map`]: trait.Iterator.html#method.map /// [`Iterator`]: trait.Iterator.html /// /// # Notes about side effects /// /// The [`map`] iterator implements [`DoubleEndedIterator`], meaning that /// you can also [`map`] backwards: /// /// ```rust /// let v: Vec<i32> = vec![1, 2, 3].into_iter().map(|x| x + 1).rev().collect(); /// /// assert_eq!(v, [4, 3, 2]); /// ``` /// /// [`DoubleEndedIterator`]: trait.DoubleEndedIterator.html /// /// But if your closure has state, iterating backwards may act in a way you do /// not expect. Let's go through an example. First, in the forward direction: /// /// ```rust /// let mut c = 0; /// /// for pair in vec!['a', 'b', 'c'].into_iter() /// .map(|letter| { c += 1; (letter, c) }) { /// println!("{:?}", pair); /// } /// ``` /// /// This will print "('a', 1), ('b', 2), ('c', 3)". /// /// Now consider this twist where we add a call to `rev`. This version will /// print `('c', 1), ('b', 2), ('a', 3)`. Note that the letters are reversed, /// but the values of the counter still go in order. This is because `map()` is /// still being called lazily on each item, but we are popping items off the /// back of the vector now, instead of shifting them from the front. /// /// ```rust /// let mut c = 0; /// /// for pair in vec!['a', 'b', 'c'].into_iter() /// .map(|letter| { c += 1; (letter, c) }) /// .rev() { /// println!("{:?}", pair); /// } /// ``` #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] #[derive(Clone)] pub struct Map<I, F> { iter: I, f: F, } #[stable(feature = "core_impl_debug", since = "1.9.0")] impl<I: fmt::Debug, F> fmt::Debug for Map<I, F> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Map") .field("iter", &self.iter) .finish() } } #[stable(feature = "rust1", since = "1.0.0")] impl<B, I: Iterator, F> Iterator for Map<I, F> where F: FnMut(I::Item) -> B { type Item = B; #[inline] fn next(&mut self) -> Option<B> { self.iter.next().map(&mut self.f) } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } fn fold<Acc, G>(self, init: Acc, mut g: G) -> Acc where G: FnMut(Acc, Self::Item) -> Acc, { let mut f = self.f; self.iter.fold(init, move |acc, elt| g(acc, f(elt))) } } #[stable(feature = "rust1", since = "1.0.0")] impl<B, I: DoubleEndedIterator, F> DoubleEndedIterator for Map<I, F> where F: FnMut(I::Item) -> B, { #[inline] fn next_back(&mut self) -> Option<B> { self.iter.next_back().map(&mut self.f) } fn rfold<Acc, G>(self, init: Acc, mut g: G) -> Acc where G: FnMut(Acc, Self::Item) -> Acc, { let mut f = self.f; self.iter.rfold(init, move |acc, elt| g(acc, f(elt))) } } #[stable(feature = "rust1", since = "1.0.0")] impl<B, I: ExactSizeIterator, F> ExactSizeIterator for Map<I, F> where F: FnMut(I::Item) -> B { fn len(&self) -> usize { self.iter.len() } fn is_empty(&self) -> bool { self.iter.is_empty() } } #[unstable(feature = "fused", issue = "35602")] impl<B, I: FusedIterator, F> FusedIterator for Map<I, F> where F: FnMut(I::Item) -> B {} #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl<B, I, F> TrustedLen for Map<I, F> where I: TrustedLen, F: FnMut(I::Item) -> B {} #[doc(hidden)] unsafe impl<B, I, F> TrustedRandomAccess for Map<I, F> where I: TrustedRandomAccess, F: FnMut(I::Item) -> B, { unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { (self.f)(self.iter.get_unchecked(i)) } #[inline] fn may_have_side_effect() -> bool { true } } /// An iterator that filters the elements of `iter` with `predicate`. /// /// This `struct` is created by the [`filter`] method on [`Iterator`]. See its /// documentation for more. /// /// [`filter`]: trait.Iterator.html#method.filter /// [`Iterator`]: trait.Iterator.html #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] #[derive(Clone)] pub struct Filter<I, P> { iter: I, predicate: P, } #[stable(feature = "core_impl_debug", since = "1.9.0")] impl<I: fmt::Debug, P> fmt::Debug for Filter<I, P> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Filter") .field("iter", &self.iter) .finish() } } #[stable(feature = "rust1", since = "1.0.0")] impl<I: Iterator, P> Iterator for Filter<I, P> where P: FnMut(&I::Item) -> bool { type Item = I::Item; #[inline] fn next(&mut self) -> Option<I::Item> { for x in &mut self.iter { if (self.predicate)(&x) { return Some(x); } } None } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let (_, upper) = self.iter.size_hint(); (0, upper) // can't know a lower bound, due to the predicate } // this special case allows the compiler to make `.filter(_).count()` // branchless. Barring perfect branch prediction (which is unattainable in // the general case), this will be much faster in >90% of cases (containing // virtually all real workloads) and only a tiny bit slower in the rest. // // Having this specialization thus allows us to write `.filter(p).count()` // where we would otherwise write `.map(|x| p(x) as usize).sum()`, which is // less readable and also less backwards-compatible to Rust before 1.10. // // Using the branchless version will also simplify the LLVM byte code, thus // leaving more budget for LLVM optimizations. #[inline] fn count(mut self) -> usize { let mut count = 0; for x in &mut self.iter { count += (self.predicate)(&x) as usize; } count } } #[stable(feature = "rust1", since = "1.0.0")] impl<I: DoubleEndedIterator, P> DoubleEndedIterator for Filter<I, P> where P: FnMut(&I::Item) -> bool, { #[inline] fn next_back(&mut self) -> Option<I::Item> { for x in self.iter.by_ref().rev() { if (self.predicate)(&x) { return Some(x); } } None } } #[unstable(feature = "fused", issue = "35602")] impl<I: FusedIterator, P> FusedIterator for Filter<I, P> where P: FnMut(&I::Item) -> bool {} /// An iterator that uses `f` to both filter and map elements from `iter`. /// /// This `struct` is created by the [`filter_map`] method on [`Iterator`]. See its /// documentation for more. /// /// [`filter_map`]: trait.Iterator.html#method.filter_map /// [`Iterator`]: trait.Iterator.html #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] #[derive(Clone)] pub struct FilterMap<I, F> { iter: I, f: F, } #[stable(feature = "core_impl_debug", since = "1.9.0")] impl<I: fmt::Debug, F> fmt::Debug for FilterMap<I, F> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("FilterMap") .field("iter", &self.iter) .finish() } } #[stable(feature = "rust1", since = "1.0.0")] impl<B, I: Iterator, F> Iterator for FilterMap<I, F> where F: FnMut(I::Item) -> Option<B>, { type Item = B; #[inline] fn next(&mut self) -> Option<B> { for x in self.iter.by_ref() { if let Some(y) = (self.f)(x) { return Some(y); } } None } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let (_, upper) = self.iter.size_hint(); (0, upper) // can't know a lower bound, due to the predicate } } #[stable(feature = "rust1", since = "1.0.0")] impl<B, I: DoubleEndedIterator, F> DoubleEndedIterator for FilterMap<I, F> where F: FnMut(I::Item) -> Option<B>, { #[inline] fn next_back(&mut self) -> Option<B> { for x in self.iter.by_ref().rev() { if let Some(y) = (self.f)(x) { return Some(y); } } None } } #[unstable(feature = "fused", issue = "35602")] impl<B, I: FusedIterator, F> FusedIterator for FilterMap<I, F> where F: FnMut(I::Item) -> Option<B> {} /// An iterator that yields the current count and the element during iteration. /// /// This `struct` is created by the [`enumerate`] method on [`Iterator`]. See its /// documentation for more. /// /// [`enumerate`]: trait.Iterator.html#method.enumerate /// [`Iterator`]: trait.Iterator.html #[derive(Clone, Debug)] #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] pub struct Enumerate<I> { iter: I, count: usize, } #[stable(feature = "rust1", since = "1.0.0")] impl<I> Iterator for Enumerate<I> where I: Iterator { type Item = (usize, <I as Iterator>::Item); /// # Overflow Behavior /// /// The method does no guarding against overflows, so enumerating more than /// `usize::MAX` elements either produces the wrong result or panics. If /// debug assertions are enabled, a panic is guaranteed. /// /// # Panics /// /// Might panic if the index of the element overflows a `usize`. #[inline] #[rustc_inherit_overflow_checks] fn next(&mut self) -> Option<(usize, <I as Iterator>::Item)> { self.iter.next().map(|a| { let ret = (self.count, a); // Possible undefined overflow. self.count += 1; ret }) } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } #[inline] #[rustc_inherit_overflow_checks] fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> { self.iter.nth(n).map(|a| { let i = self.count + n; self.count = i + 1; (i, a) }) } #[inline] fn count(self) -> usize { self.iter.count() } } #[stable(feature = "rust1", since = "1.0.0")] impl<I> DoubleEndedIterator for Enumerate<I> where I: ExactSizeIterator + DoubleEndedIterator { #[inline] fn next_back(&mut self) -> Option<(usize, <I as Iterator>::Item)> { self.iter.next_back().map(|a| { let len = self.iter.len(); // Can safely add, `ExactSizeIterator` promises that the number of // elements fits into a `usize`. (self.count + len, a) }) } } #[stable(feature = "rust1", since = "1.0.0")] impl<I> ExactSizeIterator for Enumerate<I> where I: ExactSizeIterator { fn len(&self) -> usize { self.iter.len() } fn is_empty(&self) -> bool { self.iter.is_empty() } } #[doc(hidden)] unsafe impl<I> TrustedRandomAccess for Enumerate<I> where I: TrustedRandomAccess { unsafe fn get_unchecked(&mut self, i: usize) -> (usize, I::Item) { (self.count + i, self.iter.get_unchecked(i)) } fn
() -> bool { I::may_have_side_effect() } } #[unstable(feature = "fused", issue = "35602")] impl<I> FusedIterator for Enumerate<I> where I: FusedIterator {} #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl<I> TrustedLen for Enumerate<I> where I: TrustedLen, {} /// An iterator with a `peek()` that returns an optional reference to the next /// element. /// /// This `struct` is created by the [`peekable`] method on [`Iterator`]. See its /// documentation for more. /// /// [`peekable`]: trait.Iterator.html#method.peekable /// [`Iterator`]: trait.Iterator.html #[derive(Clone, Debug)] #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] pub struct Peekable<I: Iterator> { iter: I, /// Remember a peeked value, even if it was None. peeked: Option<Option<I::Item>>, } // Peekable must remember if a None has been seen in the `.peek()` method. // It ensures that `.peek(); .peek();` or `.peek(); .next();` only advances the // underlying iterator at most once. This does not by itself make the iterator // fused. #[stable(feature = "rust1", since = "1.0.0")] impl<I: Iterator> Iterator for Peekable<I> { type Item = I::Item; #[inline] fn next(&mut self) -> Option<I::Item> { match self.peeked.take() { Some(v) => v, None => self.iter.next(), } } #[inline] #[rustc_inherit_overflow_checks] fn count(mut self) -> usize { match self.peeked.take() { Some(None) => 0, Some(Some(_)) => 1 + self.iter.count(), None => self.iter.count(), } } #[inline] fn nth(&mut self, n: usize) -> Option<I::Item> { match self.peeked.take() { // the .take() below is just to avoid "move into pattern guard" Some(ref mut v) if n == 0 => v.take(), Some(None) => None, Some(Some(_)) => self.iter.nth(n - 1), None => self.iter.nth(n), } } #[inline] fn last(mut self) -> Option<I::Item> { let peek_opt = match self.peeked.take() { Some(None) => return None, Some(v) => v, None => None, }; self.iter.last().or(peek_opt) } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let peek_len = match self.peeked { Some(None) => return (0, Some(0)), Some(Some(_)) => 1, None => 0, }; let (lo, hi) = self.iter.size_hint(); let lo = lo.saturating_add(peek_len); let hi = hi.and_then(|x| x.checked_add(peek_len)); (lo, hi) } } #[stable(feature = "rust1", since = "1.0.0")] impl<I: ExactSizeIterator> ExactSizeIterator for Peekable<I> {} #[unstable(feature = "fused", issue = "35602")] impl<I: FusedIterator> FusedIterator for Peekable<I> {} impl<I: Iterator> Peekable<I> { /// Returns a reference to the next() value without advancing the iterator. /// /// Like [`next`], if there is a value, it is wrapped in a `Some(T)`. /// But if the iteration is over, `None` is returned. /// /// [`next`]: trait.Iterator.html#tymethod.next /// /// Because `peek()` returns a reference, and many iterators iterate over /// references, there can be a possibly confusing situation where the /// return value is a double reference. You can see this effect in the /// examples below. /// /// # Examples /// /// Basic usage: /// /// ``` /// let xs = [1, 2, 3]; /// /// let mut iter = xs.iter().peekable(); /// /// // peek() lets us see into the future /// assert_eq!(iter.peek(), Some(&&1)); /// assert_eq!(iter.next(), Some(&1)); /// /// assert_eq!(iter.next(), Some(&2)); /// /// // The iterator does not advance even if we `peek` multiple times /// assert_eq!(iter.peek(), Some(&&3)); /// assert_eq!(iter.peek(), Some(&&3)); /// /// assert_eq!(iter.next(), Some(&3)); /// /// // After the iterator is finished, so is `peek()` /// assert_eq!(iter.peek(), None); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn peek(&mut self) -> Option<&I::Item> { if self.peeked.is_none() { self.peeked = Some(self.iter.next()); } match self.peeked { Some(Some(ref value)) => Some(value), Some(None) => None, _ => unreachable!(), } } } /// An iterator that rejects elements while `predicate` is true. /// /// This `struct` is created by the [`skip_while`] method on [`Iterator`]. See its /// documentation for more. /// /// [`skip_while`]: trait.Iterator.html#method.skip_while /// [`Iterator`]: trait.Iterator.html #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] #[derive(Clone)] pub struct SkipWhile<I, P> { iter: I, flag: bool, predicate: P, } #[stable(feature = "core_impl_debug", since = "1.9.0")] impl<I: fmt::Debug, P> fmt::Debug for SkipWhile<I, P> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SkipWhile") .field("iter", &self.iter) .field("flag", &self.flag) .finish() } } #[stable(feature = "rust1", since = "1.0.0")] impl<I: Iterator, P> Iterator for SkipWhile<I, P> where P: FnMut(&I::Item) -> bool { type Item = I::Item; #[inline] fn next(&mut self) -> Option<I::Item> { for x in self.iter.by_ref() { if self.flag || !(self.predicate)(&x) { self.flag = true; return Some(x); } } None } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let (_, upper) = self.iter.size_hint(); (0, upper) // can't know a lower bound, due to the predicate } } #[unstable(feature = "fused", issue = "35602")] impl<I, P> FusedIterator for SkipWhile<I, P> where I: FusedIterator, P: FnMut(&I::Item) -> bool {} /// An iterator that only accepts elements while `predicate` is true. /// /// This `struct` is created by the [`take_while`] method on [`Iterator`]. See its /// documentation for more. /// /// [`take_while`]: trait.Iterator.html#method.take_while /// [`Iterator`]: trait.Iterator.html #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] #[derive(Clone)] pub struct TakeWhile<I, P> { iter: I, flag: bool, predicate: P, } #[stable(feature = "core_impl_debug", since = "1.9.0")] impl<I: fmt::Debug, P> fmt::Debug for TakeWhile<I, P> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("TakeWhile") .field("iter", &self.iter) .field("flag", &self.flag) .finish() } } #[stable(feature = "rust1", since = "1.0.0")] impl<I: Iterator, P> Iterator for TakeWhile<I, P> where P: FnMut(&I::Item) -> bool { type Item = I::Item; #[inline] fn next(&mut self) -> Option<I::Item> { if self.flag { None } else { self.iter.next().and_then(|x| { if (self.predicate)(&x) { Some(x) } else { self.flag = true; None } }) } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let (_, upper) = self.iter.size_hint(); (0, upper) // can't know a lower bound, due to the predicate } } #[unstable(feature = "fused", issue = "35602")] impl<I, P> FusedIterator for TakeWhile<I, P> where I: FusedIterator, P: FnMut(&I::Item) -> bool {} /// An iterator that skips over `n` elements of `iter`. /// /// This `struct` is created by the [`skip`] method on [`Iterator`]. See its /// documentation for more. /// /// [`skip`]: trait.Iterator.html#method.skip /// [`Iterator`]: trait.Iterator.html #[derive(Clone, Debug)] #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] pub struct Skip<I> { iter: I, n: usize } #[stable(feature = "rust1", since = "1.0.0")] impl<I> Iterator for Skip<I> where I: Iterator { type Item = <I as Iterator>::Item; #[inline] fn next(&mut self) -> Option<I::Item> { if self.n == 0 { self.iter.next() } else { let old_n = self.n; self.n = 0; self.iter.nth(old_n) } } #[inline] fn nth(&mut self, n: usize) -> Option<I::Item> { // Can't just add n + self.n due to overflow. if self.n == 0 { self.iter.nth(n) } else { let to_skip = self.n; self.n = 0; // nth(n) skips n+1 if self.iter.nth(to_skip-1).is_none() { return None; } self.iter.nth(n) } } #[inline] fn count(self) -> usize { self.iter.count().saturating_sub(self.n) } #[inline] fn last(mut self) -> Option<I::Item> { if self.n == 0 { self.iter.last() } else { let next = self.next(); if next.is_some() { // recurse. n should be 0. self.last().or(next) } else { None } } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let (lower, upper) = self.iter.size_hint(); let lower = lower.saturating_sub(self.n); let upper = upper.map(|x| x.saturating_sub(self.n)); (lower, upper) } } #[stable(feature = "rust1", since = "1.0.0")] impl<I> ExactSizeIterator for Skip<I> where I: ExactSizeIterator {} #[stable(feature = "double_ended_skip_iterator", since = "1.9.0")] impl<I> DoubleEndedIterator for Skip<I> where I: DoubleEndedIterator + ExactSizeIterator { fn next_back(&mut self) -> Option<Self::Item> { if self.len() > 0 { self.iter.next_back() } else { None } } } #[unstable(feature = "fused", issue = "35602")] impl<I> FusedIterator for Skip<I> where I: FusedIterator {} /// An iterator that only iterates over the first `n` iterations of `iter`. /// /// This `struct` is created by the [`take`] method on [`Iterator`]. See its /// documentation for more. /// /// [`take`]: trait.Iterator.html#method.take /// [`Iterator`]: trait.Iterator.html #[derive(Clone, Debug)] #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] pub struct Take<I> { iter: I, n: usize } #[stable(feature = "rust1", since = "1.0.0")] impl<I> Iterator for Take<I> where I: Iterator{ type Item = <I as Iterator>::Item; #[inline] fn next(&mut self) -> Option<<I as Iterator>::Item> { if self.n != 0 { self.n -= 1; self.iter.next() } else { None } } #[inline] fn nth(&mut self, n: usize) -> Option<I::Item> { if self.n > n { self.n -= n + 1; self.iter.nth(n) } else { if self.n > 0 { self.iter.nth(self.n - 1); self.n = 0; } None } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let (lower, upper) = self.iter.size_hint(); let lower = cmp::min(lower, self.n); let upper = match upper { Some(x) if x < self.n => Some(x), _ => Some(self.n) }; (lower, upper) } } #[stable(feature = "rust1", since = "1.0.0")] impl<I> ExactSizeIterator for Take<I> where I: ExactSizeIterator {} #[unstable(feature = "fused", issue = "35602")] impl<I> FusedIterator for Take<I> where I: FusedIterator {} /// An iterator to maintain state while iterating another iterator. /// /// This `struct` is created by the [`scan`] method on [`Iterator`]. See its /// documentation for more. /// /// [`scan`]: trait.Iterator.html#method.scan /// [`Iterator`]: trait.Iterator.html #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] #[derive(Clone)] pub struct Scan<I, St, F> { iter: I, f: F, state: St, } #[stable(feature = "core_impl_debug", since = "1.9.0")] impl<I: fmt::Debug, St: fmt::Debug, F> fmt::Debug for Scan<I, St, F> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Scan") .field("iter", &self.iter) .field("state", &self.state) .finish() } } #[stable(feature = "rust1", since = "1.0.0")] impl<B, I, St, F> Iterator for Scan<I, St, F> where I: Iterator, F: FnMut(&mut St, I::Item) -> Option<B>, { type Item = B; #[inline] fn next(&mut self) -> Option<B> { self.iter.next().and_then(|a| (self.f)(&mut self.state, a)) } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let (_, upper) = self.iter.size_hint(); (0, upper) // can't know a lower bound, due to the scan function } } /// An iterator that maps each element to an iterator, and yields the elements /// of the produced iterators. /// /// This `struct` is created by the [`flat_map`] method on [`Iterator`]. See its /// documentation for more. /// /// [`flat_map`]: trait.Iterator.html#method.flat_map /// [`Iterator`]: trait.Iterator.html #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] #[derive(Clone)] pub struct FlatMap<I, U: IntoIterator, F> { iter: I, f: F, frontiter: Option<U::IntoIter>, backiter: Option<U::IntoIter>, } #[stable(feature = "core_impl_debug", since = "1.9.0")] impl<I: fmt::Debug, U: IntoIterator, F> fmt::Debug for FlatMap<I, U, F> where U::IntoIter: fmt::Debug { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("FlatMap") .field("iter", &self.iter) .field("frontiter", &self.frontiter) .field("backiter", &self.backiter) .finish() } } #[stable(feature = "rust1", since = "1.0.0")] impl<I: Iterator, U: IntoIterator, F> Iterator for FlatMap<I, U, F> where F: FnMut(I::Item) -> U, { type Item = U::Item; #[inline] fn next(&mut self) -> Option<U::Item> { loop { if let Some(ref mut inner) = self.frontiter { if let Some(x) = inner.by_ref().next() { return Some(x) } } match self.iter.next().map(&mut self.f) { None => return self.backiter.as_mut().and_then(|it| it.next()), next => self.frontiter = next.map(IntoIterator::into_iter), } } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let (flo, fhi) = self.frontiter.as_ref().map_or((0, Some(0)), |it| it.size_hint()); let (blo, bhi) = self.backiter.as_ref().map_or((0, Some(0)), |it| it.size_hint()); let lo = flo.saturating_add(blo); match (self.iter.size_hint(), fhi, bhi) { ((0, Some(0)), Some(a), Some(b)) => (lo, a.checked_add(b)), _ => (lo, None) } } #[inline] fn fold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc where Fold: FnMut(Acc, Self::Item) -> Acc, { self.frontiter.into_iter() .chain(self.iter.map(self.f).map(U::into_iter)) .chain(self.backiter) .fold(init, |acc, iter| iter.fold(acc, &mut fold)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<I: DoubleEndedIterator, U, F> DoubleEndedIterator for FlatMap<I, U, F> where F: FnMut(I::Item) -> U, U: IntoIterator, U::IntoIter: DoubleEndedIterator { #[inline] fn next_back(&mut self) -> Option<U::Item> { loop { if let Some(ref mut inner) = self.backiter { if let Some(y) = inner.next_back() { return Some(y) } } match self.iter.next_back().map(&mut self.f) { None => return self.frontiter.as_mut().and_then(|it| it.next_back()), next => self.backiter = next.map(IntoIterator::into_iter), } } } } #[unstable(feature = "fused", issue = "35602")] impl<I, U, F> FusedIterator for FlatMap<I, U, F> where I: FusedIterator, U: IntoIterator, F: FnMut(I::Item) -> U {} /// An iterator that yields `None` forever after the underlying iterator /// yields `None` once. /// /// This `struct` is created by the [`fuse`] method on [`Iterator`]. See its /// documentation for more. /// /// [`fuse`]: trait.Iterator.html#method.fuse /// [`Iterator`]: trait.Iterator.html #[derive(Clone, Debug)] #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] pub struct Fuse<I> { iter: I, done: bool } #[unstable(feature = "fused", issue = "35602")] impl<I> FusedIterator for Fuse<I> where I: Iterator {} #[stable(feature = "rust1", since = "1.0.0")] impl<I> Iterator for Fuse<I> where I: Iterator { type Item = <I as Iterator>::Item; #[inline] default fn next(&mut self) -> Option<<I as Iterator>::Item> { if self.done { None } else { let next = self.iter.next(); self.done = next.is_none(); next } } #[inline] default fn nth(&mut self, n: usize) -> Option<I::Item> { if self.done { None } else { let nth = self.iter.nth(n); self.done = nth.is_none(); nth } } #[inline] default fn last(self) -> Option<I::Item> { if self.done { None } else { self.iter.last() } } #[inline] default fn count(self) -> usize { if self.done { 0 } else { self.iter.count() } } #[inline] default fn size_hint(&self) -> (usize, Option<usize>) { if self.done { (0, Some(0)) } else { self.iter.size_hint() } } } #[stable(feature = "rust1", since = "1.0.0")] impl<I> DoubleEndedIterator for Fuse<I> where I: DoubleEndedIterator { #[inline] default fn next_back(&mut self) -> Option<<I as Iterator>::Item> { if self.done { None } else { let next = self.iter.next_back(); self.done = next.is_none(); next } } } unsafe impl<I> TrustedRandomAccess for Fuse<I> where I: TrustedRandomAccess, { unsafe fn get_unchecked(&mut self, i: usize) -> I::Item { self.iter.get_unchecked(i) } fn may_have_side_effect() -> bool { I::may_have_side_effect() } } #[unstable(feature = "fused", issue = "35602")] impl<I> Iterator for Fuse<I> where I: FusedIterator { #[inline] fn next(&mut self) -> Option<<I as Iterator>::Item> { self.iter.next() } #[inline] fn nth(&mut self, n: usize) -> Option<I::Item> { self.iter.nth(n) } #[inline] fn last(self) -> Option<I::Item> { self.iter.last() } #[inline] fn count(self) -> usize { self.iter.count() } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } } #[unstable(feature = "fused", reason = "recently added", issue = "35602")] impl<I> DoubleEndedIterator for Fuse<I> where I: DoubleEndedIterator + FusedIterator { #[inline] fn next_back(&mut self) -> Option<<I as Iterator>::Item> { self.iter.next_back() } } #[stable(feature = "rust1", since = "1.0.0")] impl<I> ExactSizeIterator for Fuse<I> where I: ExactSizeIterator { fn len(&self) -> usize { self.iter.len() } fn is_empty(&self) -> bool { self.iter.is_empty() } } /// An iterator that calls a function with a reference to each element before /// yielding it. /// /// This `struct` is created by the [`inspect`] method on [`Iterator`]. See its /// documentation for more. /// /// [`inspect`]: trait.Iterator.html#method.inspect /// [`Iterator`]: trait.Iterator.html #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] #[derive(Clone)] pub struct Inspect<I, F> { iter: I, f: F, } #[stable(feature = "core_impl_debug", since = "1.9.0")] impl<I: fmt::Debug, F> fmt::Debug for Inspect<I, F> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Inspect") .field("iter", &self.iter) .finish() } } impl<I: Iterator, F> Inspect<I, F> where F: FnMut(&I::Item) { #[inline] fn do_inspect(&mut self, elt: Option<I::Item>) -> Option<I::Item> { if let Some(ref a) = elt { (self.f)(a); } elt } } #[stable(feature = "rust1", since = "1.0.0")] impl<I: Iterator, F> Iterator for Inspect<I, F> where F: FnMut(&I::Item) { type Item = I::Item; #[inline] fn next(&mut self) -> Option<I::Item> { let next = self.iter.next(); self.do_inspect(next) } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } } #[stable(feature = "rust1", since = "1.0.0")] impl<I: DoubleEndedIterator, F> DoubleEndedIterator for Inspect<I, F> where F: FnMut(&I::Item), { #[inline] fn next_back(&mut self) -> Option<I::Item> { let next = self.iter.next_back(); self.do_inspect(next) } } #[stable(feature = "rust1", since = "1.0.0")] impl<I: ExactSizeIterator, F> ExactSizeIterator for Inspect<I, F> where F: FnMut(&I::Item) { fn len(&self) -> usize { self.iter.len() } fn is_empty(&self) -> bool { self.iter.is_empty() } } #[unstable(feature = "fused", issue = "35602")] impl<I: FusedIterator, F> FusedIterator for Inspect<I, F> where F: FnMut(&I::Item) {}
may_have_side_effect
request_1_6_test.go
// +build go1.6 package request_test import ( "testing" "github.com/stretchr/testify/assert" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/endpoints" ) // go version 1.4 and 1.5 do not return an error. Version 1.5 will url encode // the uri while 1.4 will not func
(t *testing.T) { endpoint, _ := endpoints.NormalizeEndpoint("localhost:80 ", "test-service", "test-region", false) r := request.New( aws.Config{}, metadata.ClientInfo{Endpoint: endpoint}, defaults.Handlers(), client.DefaultRetryer{}, &request.Operation{}, nil, nil, ) assert.Error(t, r.Error) }
TestRequestInvalidEndpoint
aead_aes_256_cbc_hmac_sha256.go
package algorithms import ( "bytes" "fmt" "github.com/swisscom/mssql-always-encrypted/pkg/crypto" "github.com/swisscom/mssql-always-encrypted/pkg/encryption" "github.com/swisscom/mssql-always-encrypted/pkg/keys" ) // https://tools.ietf.org/html/draft-mcgrew-aead-aes-cbc-hmac-sha2-05 // https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-TDS/%5bMS-TDS%5d.pdf var _ Algorithm = &AeadAes256CbcHmac256Algorithm{} type AeadAes256CbcHmac256Algorithm struct { algorithmVersion byte deterministic bool blockSizeBytes int keySizeBytes int minimumCipherTextLengthBytesNoAuthTag int minimumCipherTextLengthBytesWithAuthTag int cek keys.AeadAes256CbcHmac256 version []byte versionSize []byte } func NewAeadAes256CbcHmac256Algorithm(key keys.AeadAes256CbcHmac256, encType encryption.Type, algorithmVersion byte) AeadAes256CbcHmac256Algorithm
func (a *AeadAes256CbcHmac256Algorithm) Encrypt(bytes []byte) ([]byte, error) { panic("implement me") } func (a *AeadAes256CbcHmac256Algorithm) Decrypt(ciphertext []byte) ([]byte, error) { // This algorithm always has the auth tag! minimumCiphertextLength := a.minimumCipherTextLengthBytesWithAuthTag if len(ciphertext) < minimumCiphertextLength { return nil, fmt.Errorf("invalid ciphertext length: at least %v bytes expected", minimumCiphertextLength) } idx := 0 if ciphertext[idx] != a.algorithmVersion { return nil, fmt.Errorf("invalid algorithm version used: %v found but %v expected", ciphertext[idx], a.algorithmVersion) } idx++ authTag := ciphertext[idx:idx+a.keySizeBytes] idx += a.keySizeBytes iv := ciphertext[idx : idx+a.blockSizeBytes] idx += len(iv) realCiphertext := ciphertext[idx:] ourAuthTag := a.prepareAuthTag(iv, realCiphertext) if bytes.Compare(ourAuthTag, authTag) != 0 { return nil, fmt.Errorf("invalid auth tag") } // decrypt aescdbc := crypto.NewAESCbcPKCS5(a.cek.EncryptionKey(), iv) cleartext := aescdbc.Decrypt(realCiphertext) return cleartext, nil } func (a *AeadAes256CbcHmac256Algorithm) prepareAuthTag(iv []byte, ciphertext []byte) []byte { var input = make([]byte, 0) input = append(input, a.algorithmVersion) input = append(input, iv...) input = append(input, ciphertext...) input = append(input, a.versionSize...) return crypto.Sha256Hmac(input, a.cek.MacKey()) }
{ const keySizeBytes = 256 / 8 const blockSizeBytes = 16 const minimumCipherTextLengthBytesNoAuthTag = 1 + 2*blockSizeBytes const minimumCipherTextLengthBytesWithAuthTag = minimumCipherTextLengthBytesNoAuthTag + keySizeBytes a := AeadAes256CbcHmac256Algorithm{ algorithmVersion: algorithmVersion, deterministic: encType.Deterministic, blockSizeBytes: blockSizeBytes, keySizeBytes: keySizeBytes, cek: key, minimumCipherTextLengthBytesNoAuthTag: minimumCipherTextLengthBytesNoAuthTag, minimumCipherTextLengthBytesWithAuthTag: minimumCipherTextLengthBytesWithAuthTag, version: []byte{0x01}, versionSize: []byte{1}, } a.version[0] = algorithmVersion return a }
build.rs
//! Tests for the `cargo build` command. use cargo::{ core::compiler::CompileMode, core::{Shell, Workspace}, ops::CompileOptions, Config, }; use cargo_test_support::compare; use cargo_test_support::paths::{root, CargoPathExt}; use cargo_test_support::registry::Package; use cargo_test_support::tools; use cargo_test_support::{ basic_bin_manifest, basic_lib_manifest, basic_manifest, cargo_exe, git, is_nightly, main_file, paths, process, project, rustc_host, sleep_ms, symlink_supported, t, Execs, ProjectBuilder, }; use cargo_util::paths::dylib_path_envvar; use std::env; use std::fs; use std::io::Read; use std::process::Stdio; #[cargo_test] fn cargo_compile_simple() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .build(); p.cargo("build").run(); assert!(p.bin("foo").is_file()); p.process(&p.bin("foo")).with_stdout("i am foo\n").run(); } #[cargo_test] fn cargo_fail_with_no_stderr() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &String::from("refusal")) .build(); p.cargo("build --message-format=json") .with_status(101) .with_stderr_does_not_contain("--- stderr") .run(); } /// Checks that the `CARGO_INCREMENTAL` environment variable results in /// `rustc` getting `-C incremental` passed to it. #[cargo_test] fn cargo_compile_incremental() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .build(); p.cargo("build -v") .env("CARGO_INCREMENTAL", "1") .with_stderr_contains( "[RUNNING] `rustc [..] -C incremental=[..]/target/debug/incremental[..]`\n", ) .run(); p.cargo("test -v") .env("CARGO_INCREMENTAL", "1") .with_stderr_contains( "[RUNNING] `rustc [..] -C incremental=[..]/target/debug/incremental[..]`\n", ) .run(); } #[cargo_test] fn incremental_profile() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" authors = [] [profile.dev] incremental = false [profile.release] incremental = true "#, ) .file("src/main.rs", "fn main() {}") .build(); p.cargo("build -v") .env_remove("CARGO_INCREMENTAL") .with_stderr_does_not_contain("[..]C incremental=[..]") .run(); p.cargo("build -v") .env("CARGO_INCREMENTAL", "1") .with_stderr_contains("[..]C incremental=[..]") .run(); p.cargo("build --release -v") .env_remove("CARGO_INCREMENTAL") .with_stderr_contains("[..]C incremental=[..]") .run(); p.cargo("build --release -v") .env("CARGO_INCREMENTAL", "0") .with_stderr_does_not_contain("[..]C incremental=[..]") .run(); } #[cargo_test] fn incremental_config() { let p = project() .file("src/main.rs", "fn main() {}") .file( ".cargo/config", r#" [build] incremental = false "#, ) .build(); p.cargo("build -v") .env_remove("CARGO_INCREMENTAL") .with_stderr_does_not_contain("[..]C incremental=[..]") .run(); p.cargo("build -v") .env("CARGO_INCREMENTAL", "1") .with_stderr_contains("[..]C incremental=[..]") .run(); } #[cargo_test] fn cargo_compile_with_workspace_excluded() { let p = project().file("src/main.rs", "fn main() {}").build(); p.cargo("build --workspace --exclude foo") .with_stderr_does_not_contain("[..]virtual[..]") .with_stderr_contains("[..]no packages to compile") .with_status(101) .run(); } #[cargo_test] fn cargo_compile_manifest_path() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .build(); p.cargo("build --manifest-path foo/Cargo.toml") .cwd(p.root().parent().unwrap()) .run(); assert!(p.bin("foo").is_file()); } #[cargo_test] fn cargo_compile_with_invalid_manifest() { let p = project().file("Cargo.toml", "").build(); p.cargo("build") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: virtual manifests must be configured with [workspace] ", ) .run(); } #[cargo_test] fn cargo_compile_with_invalid_manifest2() { let p = project() .file( "Cargo.toml", " [project] foo = bar ", ) .build(); p.cargo("build") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: could not parse input as TOML Caused by: TOML parse error at line 3, column 23 | 3 | foo = bar | ^ Unexpected `b` Expected quoted string ", ) .run(); } #[cargo_test] fn cargo_compile_with_invalid_manifest3() { let p = project().file("src/Cargo.toml", "a = bar").build(); p.cargo("build --manifest-path src/Cargo.toml") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: could not parse input as TOML Caused by: TOML parse error at line 1, column 5 | 1 | a = bar | ^ Unexpected `b` Expected quoted string ", ) .run(); } #[cargo_test] fn cargo_compile_duplicate_build_targets() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [lib] name = "main" path = "src/main.rs" crate-type = ["dylib"] [dependencies] "#, ) .file("src/main.rs", "#![allow(warnings)] fn main() {}") .build(); p.cargo("build") .with_stderr( "\ warning: file found to be present in multiple build targets: [..]main.rs [COMPILING] foo v0.0.1 ([..]) [FINISHED] [..] ", ) .run(); } #[cargo_test] fn cargo_compile_with_invalid_version() { let p = project() .file("Cargo.toml", &basic_manifest("foo", "1.0")) .build(); p.cargo("build") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: unexpected end of input while parsing minor version number for key `package.version` ", ) .run(); } #[cargo_test] fn cargo_compile_with_empty_package_name() { let p = project() .file("Cargo.toml", &basic_manifest("", "0.0.0")) .build(); p.cargo("build") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: package name cannot be an empty string ", ) .run(); } #[cargo_test] fn cargo_compile_with_invalid_package_name() { let p = project() .file("Cargo.toml", &basic_manifest("foo::bar", "0.0.0")) .build(); p.cargo("build") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: invalid character `:` in package name: `foo::bar`, [..] ", ) .run(); } #[cargo_test] fn cargo_compile_with_invalid_bin_target_name() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" authors = [] version = "0.0.0" [[bin]] name = "" "#, ) .build(); p.cargo("build") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: binary target names cannot be empty ", ) .run(); } #[cargo_test] fn cargo_compile_with_forbidden_bin_target_name() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" authors = [] version = "0.0.0" [[bin]] name = "build" "#, ) .build(); p.cargo("build") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: the binary target name `build` is forbidden, it conflicts with with cargo's build directory names ", ) .run(); } #[cargo_test] fn cargo_compile_with_bin_and_crate_type() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" authors = [] version = "0.0.0" [[bin]] name = "the_foo_bin" path = "src/foo.rs" crate-type = ["cdylib", "rlib"] "#, ) .file("src/foo.rs", "fn main() {}") .build(); p.cargo("build") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: the target `the_foo_bin` is a binary and can't have any crate-types set \ (currently \"cdylib, rlib\")", ) .run(); } #[cargo_test] fn cargo_compile_api_exposes_artifact_paths() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" authors = [] version = "0.0.0" [[bin]] name = "the_foo_bin" path = "src/bin.rs" [lib] name = "the_foo_lib" path = "src/foo.rs" crate-type = ["cdylib", "rlib"] "#, ) .file("src/foo.rs", "pub fn bar() {}") .file("src/bin.rs", "pub fn main() {}") .build(); let shell = Shell::from_write(Box::new(Vec::new())); let config = Config::new(shell, env::current_dir().unwrap(), paths::home()); let ws = Workspace::new(&p.root().join("Cargo.toml"), &config).unwrap(); let compile_options = CompileOptions::new(ws.config(), CompileMode::Build).unwrap(); let result = cargo::ops::compile(&ws, &compile_options).unwrap(); assert_eq!(1, result.binaries.len()); assert!(result.binaries[0].path.exists()); assert!(result.binaries[0] .path .to_str() .unwrap() .contains("the_foo_bin")); assert_eq!(1, result.cdylibs.len()); // The exact library path varies by platform, but should certainly exist at least assert!(result.cdylibs[0].path.exists()); assert!(result.cdylibs[0] .path .to_str() .unwrap() .contains("the_foo_lib")); } #[cargo_test] fn cargo_compile_with_bin_and_proc() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" authors = [] version = "0.0.0" [[bin]] name = "the_foo_bin" path = "src/foo.rs" proc-macro = true "#, ) .file("src/foo.rs", "fn main() {}") .build(); p.cargo("build") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: the target `the_foo_bin` is a binary and can't have `proc-macro` set `true`", ) .run(); } #[cargo_test] fn cargo_compile_with_invalid_lib_target_name() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" authors = [] version = "0.0.0" [lib] name = "" "#, ) .build(); p.cargo("build") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: library target names cannot be empty ", ) .run(); } #[cargo_test] fn cargo_compile_with_invalid_non_numeric_dep_version() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" [dependencies] crossbeam = "y" "#, ) .build(); p.cargo("build") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[CWD]/Cargo.toml` Caused by: failed to parse the version requirement `y` for dependency `crossbeam` Caused by: unexpected character 'y' while parsing major version number ", ) .run(); } #[cargo_test] fn cargo_compile_without_manifest() { let p = project().no_manifest().build(); p.cargo("build") .with_status(101) .with_stderr("[ERROR] could not find `Cargo.toml` in `[..]` or any parent directory") .run(); } #[cargo_test] #[cfg(target_os = "linux")] fn cargo_compile_with_lowercase_cargo_toml() { let p = project() .no_manifest() .file("cargo.toml", &basic_manifest("foo", "0.1.0")) .file("src/lib.rs", &main_file(r#""i am foo""#, &[])) .build(); p.cargo("build") .with_status(101) .with_stderr( "[ERROR] could not find `Cargo.toml` in `[..]` or any parent directory, \ but found cargo.toml please try to rename it to Cargo.toml", ) .run(); } #[cargo_test] fn cargo_compile_with_invalid_code() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", "invalid rust code!") .build(); p.cargo("build") .with_status(101) .with_stderr_contains("[ERROR] could not compile `foo` due to previous error\n") .run(); assert!(p.root().join("Cargo.lock").is_file()); } #[cargo_test] fn cargo_compile_with_invalid_code_in_deps() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [dependencies.bar] path = "../bar" [dependencies.baz] path = "../baz" "#, ) .file("src/main.rs", "invalid rust code!") .build(); let _bar = project() .at("bar") .file("Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("src/lib.rs", "invalid rust code!") .build(); let _baz = project() .at("baz") .file("Cargo.toml", &basic_manifest("baz", "0.1.0")) .file("src/lib.rs", "invalid rust code!") .build(); p.cargo("build") .with_status(101) .with_stderr_contains("[..]invalid rust code[..]") .with_stderr_contains("[ERROR] could not compile [..]") .run(); } #[cargo_test] fn cargo_compile_with_warnings_in_the_root_package() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", "fn main() {} fn dead() {}") .build(); p.cargo("build") .with_stderr_contains("[..]function is never used: `dead`[..]") .run(); } #[cargo_test] fn cargo_compile_with_warnings_in_a_dep_package() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] [dependencies.bar] path = "bar" [[bin]] name = "foo" "#, ) .file("src/foo.rs", &main_file(r#""{}", bar::gimme()"#, &["bar"])) .file("bar/Cargo.toml", &basic_lib_manifest("bar")) .file( "bar/src/bar.rs", r#" pub fn gimme() -> &'static str { "test passed" } fn dead() {} "#, ) .build(); p.cargo("build") .with_stderr_contains("[..]function is never used: `dead`[..]") .run(); assert!(p.bin("foo").is_file()); p.process(&p.bin("foo")).with_stdout("test passed\n").run(); } #[cargo_test] fn cargo_compile_with_nested_deps_inferred() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] [dependencies.bar] path = 'bar' [[bin]] name = "foo" "#, ) .file("src/foo.rs", &main_file(r#""{}", bar::gimme()"#, &["bar"])) .file( "bar/Cargo.toml", r#" [project] name = "bar" version = "0.5.0" authors = ["[email protected]"] [dependencies.baz] path = "../baz" "#, ) .file( "bar/src/lib.rs", r#" extern crate baz; pub fn gimme() -> String { baz::gimme() } "#, ) .file("baz/Cargo.toml", &basic_manifest("baz", "0.5.0")) .file( "baz/src/lib.rs", r#" pub fn gimme() -> String { "test passed".to_string() } "#, ) .build(); p.cargo("build").run(); assert!(p.bin("foo").is_file()); assert!(!p.bin("libbar.rlib").is_file()); assert!(!p.bin("libbaz.rlib").is_file()); p.process(&p.bin("foo")).with_stdout("test passed\n").run(); } #[cargo_test] fn cargo_compile_with_nested_deps_correct_bin() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] [dependencies.bar] path = "bar" [[bin]] name = "foo" "#, ) .file("src/main.rs", &main_file(r#""{}", bar::gimme()"#, &["bar"])) .file( "bar/Cargo.toml", r#" [project] name = "bar" version = "0.5.0" authors = ["[email protected]"] [dependencies.baz] path = "../baz" "#, ) .file( "bar/src/lib.rs", r#" extern crate baz; pub fn gimme() -> String { baz::gimme() } "#, ) .file("baz/Cargo.toml", &basic_manifest("baz", "0.5.0")) .file( "baz/src/lib.rs", r#" pub fn gimme() -> String { "test passed".to_string() } "#, ) .build(); p.cargo("build").run(); assert!(p.bin("foo").is_file()); assert!(!p.bin("libbar.rlib").is_file()); assert!(!p.bin("libbaz.rlib").is_file()); p.process(&p.bin("foo")).with_stdout("test passed\n").run(); } #[cargo_test] fn cargo_compile_with_nested_deps_shorthand() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] [dependencies.bar] path = "bar" "#, ) .file("src/main.rs", &main_file(r#""{}", bar::gimme()"#, &["bar"])) .file( "bar/Cargo.toml", r#" [project] name = "bar" version = "0.5.0" authors = ["[email protected]"] [dependencies.baz] path = "../baz" [lib] name = "bar" "#, ) .file( "bar/src/bar.rs", r#" extern crate baz; pub fn gimme() -> String { baz::gimme() } "#, ) .file("baz/Cargo.toml", &basic_lib_manifest("baz")) .file( "baz/src/baz.rs", r#" pub fn gimme() -> String { "test passed".to_string() } "#, ) .build(); p.cargo("build").run(); assert!(p.bin("foo").is_file()); assert!(!p.bin("libbar.rlib").is_file()); assert!(!p.bin("libbaz.rlib").is_file()); p.process(&p.bin("foo")).with_stdout("test passed\n").run(); } #[cargo_test] fn cargo_compile_with_nested_deps_longhand() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] [dependencies.bar] path = "bar" version = "0.5.0" [[bin]] name = "foo" "#, ) .file("src/foo.rs", &main_file(r#""{}", bar::gimme()"#, &["bar"])) .file( "bar/Cargo.toml", r#" [project] name = "bar" version = "0.5.0" authors = ["[email protected]"] [dependencies.baz] path = "../baz" version = "0.5.0" [lib] name = "bar" "#, ) .file( "bar/src/bar.rs", r#" extern crate baz; pub fn gimme() -> String { baz::gimme() } "#, ) .file("baz/Cargo.toml", &basic_lib_manifest("baz")) .file( "baz/src/baz.rs", r#" pub fn gimme() -> String { "test passed".to_string() } "#, ) .build(); p.cargo("build").run(); assert!(p.bin("foo").is_file()); assert!(!p.bin("libbar.rlib").is_file()); assert!(!p.bin("libbaz.rlib").is_file()); p.process(&p.bin("foo")).with_stdout("test passed\n").run(); } // Check that Cargo gives a sensible error if a dependency can't be found // because of a name mismatch. #[cargo_test] fn cargo_compile_with_dep_name_mismatch() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = ["[email protected]"] [[bin]] name = "foo" [dependencies.notquitebar] path = "bar" "#, ) .file("src/bin/foo.rs", &main_file(r#""i am foo""#, &["bar"])) .file("bar/Cargo.toml", &basic_bin_manifest("bar")) .file("bar/src/bar.rs", &main_file(r#""i am bar""#, &[])) .build(); p.cargo("build") .with_status(101) .with_stderr( "\ error: no matching package named `notquitebar` found location searched: [CWD]/bar required by package `foo v0.0.1 ([CWD])` ", ) .run(); } // Ensure that renamed deps have a valid name #[cargo_test] fn cargo_compile_with_invalid_dep_rename() { let p = project() .file( "Cargo.toml", r#" [package] name = "buggin" version = "0.1.0" [dependencies] "haha this isn't a valid name 🐛" = { package = "libc", version = "0.1" } "#, ) .file("src/main.rs", &main_file(r#""What's good?""#, &[])) .build(); p.cargo("build") .with_status(101) .with_stderr( "\ error: failed to parse manifest at `[..]` Caused by: invalid character ` ` in dependency name: `haha this isn't a valid name 🐛`, characters must be Unicode XID characters (numbers, `-`, `_`, or most letters) ", ) .run(); } #[cargo_test] fn cargo_compile_with_filename() { let p = project() .file("src/lib.rs", "") .file( "src/bin/a.rs", r#" extern crate foo; fn main() { println!("hello a.rs"); } "#, ) .file("examples/a.rs", r#"fn main() { println!("example"); }"#) .build(); p.cargo("build --bin bin.rs") .with_status(101) .with_stderr("[ERROR] no bin target named `bin.rs`") .run(); p.cargo("build --bin a.rs") .with_status(101) .with_stderr( "\ [ERROR] no bin target named `a.rs` <tab>Did you mean `a`?", ) .run(); p.cargo("build --example example.rs") .with_status(101) .with_stderr("[ERROR] no example target named `example.rs`") .run(); p.cargo("build --example a.rs") .with_status(101) .with_stderr( "\ [ERROR] no example target named `a.rs` <tab>Did you mean `a`?", ) .run(); } #[cargo_test] fn incompatible_dependencies() { Package::new("bad", "0.1.0").publish(); Package::new("bad", "1.0.0").publish(); Package::new("bad", "1.0.1").publish(); Package::new("bad", "1.0.2").publish(); Package::new("bar", "0.1.0").dep("bad", "0.1.0").publish(); Package::new("baz", "0.1.1").dep("bad", "=1.0.0").publish(); Package::new("baz", "0.1.0").dep("bad", "=1.0.0").publish(); Package::new("qux", "0.1.2").dep("bad", ">=1.0.1").publish(); Package::new("qux", "0.1.1").dep("bad", ">=1.0.1").publish(); Package::new("qux", "0.1.0").dep("bad", ">=1.0.1").publish(); let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.0.1" [dependencies] bar = "0.1.0" baz = "0.1.0" qux = "0.1.0" "#, ) .file("src/main.rs", "fn main(){}") .build(); p.cargo("build") .with_status(101) .with_stderr_contains( "\ error: failed to select a version for `bad`. ... required by package `qux v0.1.0` ... which satisfies dependency `qux = \"^0.1.0\"` of package `foo v0.0.1 ([..])` versions that meet the requirements `>=1.0.1` are: 1.0.2, 1.0.1 all possible versions conflict with previously selected packages. previously selected package `bad v1.0.0` ... which satisfies dependency `bad = \"=1.0.0\"` of package `baz v0.1.0` ... which satisfies dependency `baz = \"^0.1.0\"` of package `foo v0.0.1 ([..])` failed to select a version for `bad` which could resolve this conflict", ) .run(); } #[cargo_test] fn incompatible_dependencies_with_multi_semver() { Package::new("bad", "1.0.0").publish(); Package::new("bad", "1.0.1").publish(); Package::new("bad", "2.0.0").publish(); Package::new("bad", "2.0.1").publish(); Package::new("bar", "0.1.0").dep("bad", "=1.0.0").publish(); Package::new("baz", "0.1.0").dep("bad", ">=2.0.1").publish(); let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.0.1" [dependencies] bar = "0.1.0" baz = "0.1.0" bad = ">=1.0.1, <=2.0.0" "#, ) .file("src/main.rs", "fn main(){}") .build(); p.cargo("build") .with_status(101) .with_stderr_contains( "\ error: failed to select a version for `bad`. ... required by package `foo v0.0.1 ([..])` versions that meet the requirements `>=1.0.1, <=2.0.0` are: 2.0.0, 1.0.1 all possible versions conflict with previously selected packages. previously selected package `bad v2.0.1` ... which satisfies dependency `bad = \">=2.0.1\"` of package `baz v0.1.0` ... which satisfies dependency `baz = \"^0.1.0\"` of package `foo v0.0.1 ([..])` previously selected package `bad v1.0.0` ... which satisfies dependency `bad = \"=1.0.0\"` of package `bar v0.1.0` ... which satisfies dependency `bar = \"^0.1.0\"` of package `foo v0.0.1 ([..])` failed to select a version for `bad` which could resolve this conflict", ) .run(); } #[cargo_test] fn compile_path_dep_then_change_version() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [dependencies.bar] path = "bar" "#, ) .file("src/lib.rs", "") .file("bar/Cargo.toml", &basic_manifest("bar", "0.0.1")) .file("bar/src/lib.rs", "") .build(); p.cargo("build").run(); p.change_file("bar/Cargo.toml", &basic_manifest("bar", "0.0.2")); p.cargo("build").run(); } #[cargo_test] fn ignores_carriage_return_in_lockfile() { let p = project() .file("src/main.rs", "mod a; fn main() {}") .file("src/a.rs", "") .build(); p.cargo("build").run(); let lock = p.read_lockfile(); p.change_file("Cargo.lock", &lock.replace("\n", "\r\n")); p.cargo("build").run(); } #[cargo_test] fn cargo_default_env_metadata_env_var() { // Ensure that path dep + dylib + env_var get metadata // (even though path_dep + dylib should not) let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [dependencies.bar] path = "bar" "#, ) .file("src/lib.rs", "// hi") .file( "bar/Cargo.toml", r#" [package] name = "bar" version = "0.0.1" authors = [] [lib] name = "bar" crate_type = ["dylib"] "#, ) .file("bar/src/lib.rs", "// hello") .build(); // No metadata on libbar since it's a dylib path dependency p.cargo("build -v") .with_stderr(&format!( "\ [COMPILING] bar v0.0.1 ([CWD]/bar) [RUNNING] `rustc --crate-name bar bar/src/lib.rs [..]--crate-type dylib \ --emit=[..]link \ -C prefer-dynamic[..]-C debuginfo=2 \ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/debug/deps` [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ --emit=[..]link[..]-C debuginfo=2 \ -C metadata=[..] \ -C extra-filename=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/debug/deps \ --extern bar=[CWD]/target/debug/deps/{prefix}bar{suffix}` [FINISHED] dev [unoptimized + debuginfo] target(s) in [..]", prefix = env::consts::DLL_PREFIX, suffix = env::consts::DLL_SUFFIX, )) .run(); p.cargo("clean").run(); // If you set the env-var, then we expect metadata on libbar p.cargo("build -v") .env("__CARGO_DEFAULT_LIB_METADATA", "stable") .with_stderr(&format!( "\ [COMPILING] bar v0.0.1 ([CWD]/bar) [RUNNING] `rustc --crate-name bar bar/src/lib.rs [..]--crate-type dylib \ --emit=[..]link \ -C prefer-dynamic[..]-C debuginfo=2 \ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/debug/deps` [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ --emit=[..]link[..]-C debuginfo=2 \ -C metadata=[..] \ -C extra-filename=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/debug/deps \ --extern bar=[CWD]/target/debug/deps/{prefix}bar-[..]{suffix}` [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", prefix = env::consts::DLL_PREFIX, suffix = env::consts::DLL_SUFFIX, )) .run(); } #[cargo_test] fn crate_env_vars() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.1-alpha.1" description = "This is foo" homepage = "https://example.com" repository = "https://example.com/repo.git" authors = ["[email protected]"] license = "MIT OR Apache-2.0" license-file = "license.txt" [[bin]] name = "foo-bar" path = "src/main.rs" "#, ) .file( "src/main.rs", r#" extern crate foo; static VERSION_MAJOR: &'static str = env!("CARGO_PKG_VERSION_MAJOR"); static VERSION_MINOR: &'static str = env!("CARGO_PKG_VERSION_MINOR"); static VERSION_PATCH: &'static str = env!("CARGO_PKG_VERSION_PATCH"); static VERSION_PRE: &'static str = env!("CARGO_PKG_VERSION_PRE"); static VERSION: &'static str = env!("CARGO_PKG_VERSION"); static CARGO_MANIFEST_DIR: &'static str = env!("CARGO_MANIFEST_DIR"); static PKG_NAME: &'static str = env!("CARGO_PKG_NAME"); static HOMEPAGE: &'static str = env!("CARGO_PKG_HOMEPAGE"); static REPOSITORY: &'static str = env!("CARGO_PKG_REPOSITORY"); static LICENSE: &'static str = env!("CARGO_PKG_LICENSE"); static LICENSE_FILE: &'static str = env!("CARGO_PKG_LICENSE_FILE"); static DESCRIPTION: &'static str = env!("CARGO_PKG_DESCRIPTION"); static BIN_NAME: &'static str = env!("CARGO_BIN_NAME"); static CRATE_NAME: &'static str = env!("CARGO_CRATE_NAME"); fn main() { let s = format!("{}-{}-{} @ {} in {}", VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, VERSION_PRE, CARGO_MANIFEST_DIR); assert_eq!(s, foo::version()); println!("{}", s); assert_eq!("foo", PKG_NAME); assert_eq!("foo-bar", BIN_NAME); assert_eq!("foo_bar", CRATE_NAME); assert_eq!("https://example.com", HOMEPAGE); assert_eq!("https://example.com/repo.git", REPOSITORY); assert_eq!("MIT OR Apache-2.0", LICENSE); assert_eq!("license.txt", LICENSE_FILE); assert_eq!("This is foo", DESCRIPTION); let s = format!("{}.{}.{}-{}", VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, VERSION_PRE); assert_eq!(s, VERSION); // Verify CARGO_TARGET_TMPDIR isn't set for bins assert!(option_env!("CARGO_TARGET_TMPDIR").is_none()); } "#, ) .file( "src/lib.rs", r#" use std::env; use std::path::PathBuf; pub fn version() -> String { format!("{}-{}-{} @ {} in {}", env!("CARGO_PKG_VERSION_MAJOR"), env!("CARGO_PKG_VERSION_MINOR"), env!("CARGO_PKG_VERSION_PATCH"), env!("CARGO_PKG_VERSION_PRE"), env!("CARGO_MANIFEST_DIR")) } pub fn check_no_int_test_env() { env::var("CARGO_TARGET_DIR").unwrap_err(); } pub fn check_tmpdir(tmp: Option<&'static str>) { let tmpdir: PathBuf = tmp.unwrap().into(); let exe: PathBuf = env::current_exe().unwrap().into(); let mut expected: PathBuf = exe.parent().unwrap() .parent().unwrap() .parent().unwrap() .into(); expected.push("tmp"); assert_eq!(tmpdir, expected); // Check that CARGO_TARGET_TMPDIR isn't set for lib code assert!(option_env!("CARGO_TARGET_TMPDIR").is_none()); env::var("CARGO_TARGET_TMPDIR").unwrap_err(); } #[test] fn env() { // Check that CARGO_TARGET_TMPDIR isn't set for unit tests assert!(option_env!("CARGO_TARGET_TMPDIR").is_none()); env::var("CARGO_TARGET_TMPDIR").unwrap_err(); } "#, ) .file( "tests/env.rs", r#" #[test] fn env() { foo::check_tmpdir(option_env!("CARGO_TARGET_TMPDIR")); } "#, ); let p = if is_nightly() { p.file( "benches/env.rs", r#" #![feature(test)] extern crate test; use test::Bencher; #[bench] fn env(_: &mut Bencher) { foo::check_tmpdir(option_env!("CARGO_TARGET_TMPDIR")); } "#, ) .build() } else { p.build() }; println!("build"); p.cargo("build -v").run(); println!("bin"); p.process(&p.bin("foo-bar")) .with_stdout("0-5-1 @ alpha.1 in [CWD]") .run(); println!("test"); p.cargo("test -v").run(); if is_nightly() { println!("bench"); p.cargo("bench -v").run(); } } #[cargo_test] fn crate_authors_env_vars() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.1-alpha.1" authors = ["[email protected]", "[email protected]"] "#, ) .file( "src/main.rs", r#" extern crate foo; static AUTHORS: &'static str = env!("CARGO_PKG_AUTHORS"); fn main() { let s = "[email protected]:[email protected]"; assert_eq!(AUTHORS, foo::authors()); println!("{}", AUTHORS); assert_eq!(s, AUTHORS); } "#, ) .file( "src/lib.rs", r#" pub fn authors() -> String { format!("{}", env!("CARGO_PKG_AUTHORS")) } "#, ) .build(); println!("build"); p.cargo("build -v").run(); println!("bin"); p.process(&p.bin("foo")) .with_stdout("[email protected]:[email protected]") .run(); println!("test"); p.cargo("test -v").run(); } #[cargo_test] fn vv_prints_rustc_env_vars() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.0.1" authors = ["escape='\"@example.com"] "#, ) .file("src/main.rs", "fn main() {}") .build(); let mut b = p.cargo("build -vv"); if cfg!(windows) { b.with_stderr_contains( "[RUNNING] `[..]set CARGO_PKG_NAME=foo&& [..]rustc [..]`" ).with_stderr_contains( r#"[RUNNING] `[..]set CARGO_PKG_AUTHORS="escape='\"@example.com"&& [..]rustc [..]`"# ) } else { b.with_stderr_contains("[RUNNING] `[..]CARGO_PKG_NAME=foo [..]rustc [..]`") .with_stderr_contains( r#"[RUNNING] `[..]CARGO_PKG_AUTHORS='escape='\''"@example.com' [..]rustc [..]`"#, ) }; b.run(); } // The tester may already have LD_LIBRARY_PATH=::/foo/bar which leads to a false positive error fn setenv_for_removing_empty_component(mut execs: Execs) -> Execs { let v = dylib_path_envvar(); if let Ok(search_path) = env::var(v) { let new_search_path = env::join_paths(env::split_paths(&search_path).filter(|e| !e.as_os_str().is_empty())) .expect("join_paths"); execs.env(v, new_search_path); // build_command() will override LD_LIBRARY_PATH accordingly } execs } // Regression test for #4277 #[cargo_test] fn crate_library_path_env_var() { let p = project() .file( "src/main.rs", &format!( r#" fn main() {{ let search_path = env!("{}"); let paths = std::env::split_paths(&search_path).collect::<Vec<_>>(); assert!(!paths.contains(&"".into())); }} "#, dylib_path_envvar() ), ) .build(); setenv_for_removing_empty_component(p.cargo("run")).run(); } // Regression test for #4277 #[cargo_test] fn build_with_fake_libc_not_loading() { let p = project() .file("src/main.rs", "fn main() {}") .file("src/lib.rs", r#" "#) .file("libc.so.6", r#""#) .build(); setenv_for_removing_empty_component(p.cargo("build")).run(); } // this is testing that src/<pkg-name>.rs still works (for now) #[cargo_test] fn many_crate_types_old_style_lib_location() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] [lib] name = "foo" crate_type = ["rlib", "dylib"] "#, ) .file("src/foo.rs", "pub fn foo() {}") .build(); p.cargo("build") .with_stderr_contains( "\ [WARNING] path `[..]src/foo.rs` was erroneously implicitly accepted for library `foo`, please rename the file to `src/lib.rs` or set lib.path in Cargo.toml", ) .run(); assert!(p.root().join("target/debug/libfoo.rlib").is_file()); let fname = format!("{}foo{}", env::consts::DLL_PREFIX, env::consts::DLL_SUFFIX); assert!(p.root().join("target/debug").join(&fname).is_file()); } #[cargo_test] fn many_crate_types_correct() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] [lib] name = "foo" crate_type = ["rlib", "dylib"] "#, ) .file("src/lib.rs", "pub fn foo() {}") .build(); p.cargo("build").run(); assert!(p.root().join("target/debug/libfoo.rlib").is_file()); let fname = format!("{}foo{}", env::consts::DLL_PREFIX, env::consts::DLL_SUFFIX); assert!(p.root().join("target/debug").join(&fname).is_file()); } #[cargo_test] fn set_both_dylib_and_cdylib_crate_types() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] [lib] name = "foo" crate_type = ["cdylib", "dylib"] "#, ) .file("src/lib.rs", "pub fn foo() {}") .build(); p.cargo("build") .with_status(101) .with_stderr( "\ error: failed to parse manifest at `[..]` Caused by: library `foo` cannot set the crate type of both `dylib` and `cdylib` ", ) .run(); } #[cargo_test] fn dev_dependencies_conflicting_warning() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" edition = "2018" [dev-dependencies] a = {path = "a"} [dev_dependencies] a = {path = "a"} "#, ) .file("src/lib.rs", "") .file( "a/Cargo.toml", r#" [package] name = "a" version = "0.0.1" "#, ) .file("a/src/lib.rs", "") .build(); p.cargo("build") .with_stderr_contains( "[WARNING] conflicting between `dev-dependencies` and `dev_dependencies` in the `foo` package.\n `dev_dependencies` is ignored and not recommended for use in the future" ) .run(); } #[cargo_test] fn build_dependencies_conflicting_warning() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" edition = "2018" [build-dependencies] a = {path = "a"} [build_dependencies] a = {path = "a"} "#, ) .file("src/lib.rs", "") .file( "a/Cargo.toml", r#" [package] name = "a" version = "0.0.1" "#, ) .file("a/src/lib.rs", "") .build(); p.cargo("build") .with_stderr_contains( "[WARNING] conflicting between `build-dependencies` and `build_dependencies` in the `foo` package.\n `build_dependencies` is ignored and not recommended for use in the future" ) .run(); } #[cargo_test] fn lib_crate_types_conflicting_warning() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] [lib] name = "foo" crate-type = ["rlib", "dylib"] crate_type = ["staticlib", "dylib"] "#, ) .file("src/lib.rs", "pub fn foo() {}") .build(); p.cargo("build") .with_stderr_contains( "[WARNING] conflicting between `crate-type` and `crate_type` in the `foo` library target.\n `crate_type` is ignored and not recommended for use in the future", ) .run(); } #[cargo_test] fn examples_crate_types_conflicting_warning() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] [[example]] name = "ex" path = "examples/ex.rs" crate-type = ["rlib", "dylib"] crate_type = ["proc_macro"] [[example]] name = "goodbye" path = "examples/ex-goodbye.rs" crate-type = ["rlib", "dylib"] crate_type = ["rlib", "staticlib"] "#, ) .file("src/lib.rs", "") .file( "examples/ex.rs", r#" fn main() { println!("ex"); } "#, ) .file( "examples/ex-goodbye.rs", r#" fn main() { println!("goodbye"); } "#, ) .build(); p.cargo("build") .with_stderr_contains( "\ [WARNING] conflicting between `crate-type` and `crate_type` in the `ex` example target.\n `crate_type` is ignored and not recommended for use in the future [WARNING] conflicting between `crate-type` and `crate_type` in the `goodbye` example target.\n `crate_type` is ignored and not recommended for use in the future", ) .run(); } #[cargo_test] fn self_dependency() { let p = project() .file( "Cargo.toml", r#" [package] name = "test" version = "0.0.0" authors = [] [dependencies.test] path = "." [lib] name = "test" path = "src/test.rs" "#, ) .file("src/test.rs", "fn main() {}") .build(); p.cargo("build") .with_status(101) .with_stderr( "\ [ERROR] cyclic package dependency: package `test v0.0.0 ([CWD])` depends on itself. Cycle: package `test v0.0.0 ([CWD])` ... which satisfies path dependency `test` of package `test v0.0.0 ([..])`", ) .run(); } #[cargo_test] /// Make sure broken and loop symlinks don't break the build /// /// This test requires you to be able to make symlinks. /// For windows, this may require you to enable developer mode. fn ignore_broken_symlinks() { if !symlink_supported() { return; } let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .symlink("Notafile", "bar") // To hit the symlink directory, we need a build script // to trigger a full scan of package files. .file("build.rs", &main_file(r#""build script""#, &[])) .symlink_dir("a/b", "a/b/c/d/foo") .build(); p.cargo("build") .with_stderr_contains( "[WARNING] File system loop found: [..]/a/b/c/d/foo points to an ancestor [..]/a/b", ) .run(); assert!(p.bin("foo").is_file()); p.process(&p.bin("foo")).with_stdout("i am foo\n").run(); } #[cargo_test] fn missing_lib_and_bin() { let p = project().build(); p.cargo("build") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]Cargo.toml` Caused by: no targets specified in the manifest either src/lib.rs, src/main.rs, a [lib] section, or [[bin]] section must be present\n", ) .run(); } #[cargo_test] fn lto_build() { let p = project() .file( "Cargo.toml", r#" [package] name = "test" version = "0.0.0" authors = [] [profile.release] lto = true "#, ) .file("src/main.rs", "fn main() {}") .build(); p.cargo("build -v --release") .with_stderr( "\ [COMPILING] test v0.0.0 ([CWD]) [RUNNING] `rustc --crate-name test src/main.rs [..]--crate-type bin \ --emit=[..]link \ -C opt-level=3 \ -C lto \ [..] [FINISHED] release [optimized] target(s) in [..] ", ) .run(); } #[cargo_test] fn verbose_build() { let p = project().file("src/lib.rs", "").build(); p.cargo("build -v") .with_stderr( "\ [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ --emit=[..]link[..]-C debuginfo=2 \ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/debug/deps` [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn verbose_release_build() { let p = project().file("src/lib.rs", "").build(); p.cargo("build -v --release") .with_stderr( "\ [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ --emit=[..]link[..]\ -C opt-level=3[..]\ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/release/deps` [FINISHED] release [optimized] target(s) in [..] ", ) .run(); } #[cargo_test] fn verbose_release_build_short() { let p = project().file("src/lib.rs", "").build(); p.cargo("build -v -r") .with_stderr( "\ [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ --emit=[..]link[..]\ -C opt-level=3[..]\ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/release/deps` [FINISHED] release [optimized] target(s) in [..] ", ) .run(); } #[cargo_test] fn verbose_release_build_deps() { let p = project() .file( "Cargo.toml", r#" [package] name = "test" version = "0.0.0" authors = [] [dependencies.foo] path = "foo" "#, ) .file("src/lib.rs", "") .file( "foo/Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [lib] name = "foo" crate_type = ["dylib", "rlib"] "#, ) .file("foo/src/lib.rs", "") .build(); p.cargo("build -v --release") .with_stderr(&format!( "\ [COMPILING] foo v0.0.0 ([CWD]/foo) [RUNNING] `rustc --crate-name foo foo/src/lib.rs [..]\ --crate-type dylib --crate-type rlib \ --emit=[..]link \ -C prefer-dynamic[..]\ -C opt-level=3[..]\ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/release/deps` [COMPILING] test v0.0.0 ([CWD]) [RUNNING] `rustc --crate-name test src/lib.rs [..]--crate-type lib \ --emit=[..]link[..]\ -C opt-level=3[..]\ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/release/deps \ --extern foo=[CWD]/target/release/deps/{prefix}foo{suffix} \ --extern foo=[CWD]/target/release/deps/libfoo.rlib` [FINISHED] release [optimized] target(s) in [..] ", prefix = env::consts::DLL_PREFIX, suffix = env::consts::DLL_SUFFIX )) .run(); } #[cargo_test] fn explicit_examples() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "1.0.0" authors = [] [lib] name = "foo" path = "src/lib.rs" [[example]] name = "hello" path = "examples/ex-hello.rs" [[example]] name = "goodbye" path = "examples/ex-goodbye.rs" "#, ) .file( "src/lib.rs", r#" pub fn get_hello() -> &'static str { "Hello" } pub fn get_goodbye() -> &'static str { "Goodbye" } pub fn get_world() -> &'static str { "World" } "#, ) .file( "examples/ex-hello.rs", r#" extern crate foo; fn main() { println!("{}, {}!", foo::get_hello(), foo::get_world()); } "#, ) .file( "examples/ex-goodbye.rs", r#" extern crate foo; fn main() { println!("{}, {}!", foo::get_goodbye(), foo::get_world()); } "#, ) .build(); p.cargo("build --examples").run(); p.process(&p.bin("examples/hello")) .with_stdout("Hello, World!\n") .run(); p.process(&p.bin("examples/goodbye")) .with_stdout("Goodbye, World!\n") .run(); } #[cargo_test] fn non_existing_test() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "1.0.0" [lib] name = "foo" path = "src/lib.rs" [[test]] name = "hello" "#, ) .file("src/lib.rs", "") .build(); p.cargo("build --tests -v") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: can't find `hello` test at `tests/hello.rs` or `tests/hello/main.rs`. \ Please specify test.path if you want to use a non-default path.", ) .run(); } #[cargo_test] fn non_existing_example() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "1.0.0" [lib] name = "foo" path = "src/lib.rs" [[example]] name = "hello" "#, ) .file("src/lib.rs", "") .build(); p.cargo("build --examples -v") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: can't find `hello` example at `examples/hello.rs` or `examples/hello/main.rs`. \ Please specify example.path if you want to use a non-default path.", ) .run(); } #[cargo_test] fn non_existing_benchmark() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "1.0.0" [lib] name = "foo" path = "src/lib.rs" [[bench]] name = "hello" "#, ) .file("src/lib.rs", "") .build(); p.cargo("build --benches -v") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: can't find `hello` bench at `benches/hello.rs` or `benches/hello/main.rs`. \ Please specify bench.path if you want to use a non-default path.", ) .run(); } #[cargo_test] fn non_existing_binary() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/lib.rs", "") .file("src/bin/ehlo.rs", "") .build(); p.cargo("build -v") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: can't find `foo` bin at `src/bin/foo.rs` or `src/bin/foo/main.rs`. \ Please specify bin.path if you want to use a non-default path.", ) .run(); } #[cargo_test] fn commonly_wrong_path_of_test() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "1.0.0" [lib] name = "foo" path = "src/lib.rs" [[test]] name = "foo" "#, ) .file("src/lib.rs", "") .file("test/foo.rs", "") .build(); p.cargo("build --tests -v") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: can't find `foo` test at default paths, but found a file at `test/foo.rs`. Perhaps rename the file to `tests/foo.rs` for target auto-discovery, \ or specify test.path if you want to use a non-default path.", ) .run(); } #[cargo_test] fn commonly_wrong_path_of_example() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "1.0.0" [lib] name = "foo" path = "src/lib.rs" [[example]] name = "foo" "#, ) .file("src/lib.rs", "") .file("example/foo.rs", "") .build(); p.cargo("build --examples -v") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: can't find `foo` example at default paths, but found a file at `example/foo.rs`. Perhaps rename the file to `examples/foo.rs` for target auto-discovery, \ or specify example.path if you want to use a non-default path.", ) .run(); } #[cargo_test] fn commonly_wrong_path_of_benchmark() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "1.0.0" [lib] name = "foo" path = "src/lib.rs" [[bench]] name = "foo" "#, ) .file("src/lib.rs", "") .file("bench/foo.rs", "") .build(); p.cargo("build --benches -v") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: can't find `foo` bench at default paths, but found a file at `bench/foo.rs`. Perhaps rename the file to `benches/foo.rs` for target auto-discovery, \ or specify bench.path if you want to use a non-default path.", ) .run(); } #[cargo_test] fn commonly_wrong_path_binary() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/lib.rs", "") .file("src/bins/foo.rs", "") .build(); p.cargo("build -v") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: can't find `foo` bin at default paths, but found a file at `src/bins/foo.rs`. Perhaps rename the file to `src/bin/foo.rs` for target auto-discovery, \ or specify bin.path if you want to use a non-default path.", ) .run(); } #[cargo_test] fn commonly_wrong_path_subdir_binary() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/lib.rs", "") .file("src/bins/foo/main.rs", "") .build(); p.cargo("build -v") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: can't find `foo` bin at default paths, but found a file at `src/bins/foo/main.rs`. Perhaps rename the file to `src/bin/foo/main.rs` for target auto-discovery, \ or specify bin.path if you want to use a non-default path.", ) .run(); } #[cargo_test] fn found_multiple_target_files() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/lib.rs", "") .file("src/bin/foo.rs", "") .file("src/bin/foo/main.rs", "") .build(); p.cargo("build -v") .with_status(101) // Don't assert the inferred pathes since the order is non-deterministic. .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: cannot infer path for `foo` bin Cargo doesn't know which to use because multiple target files found \ at `src/bin/foo[..].rs` and `src/bin/foo[..].rs`.", ) .run(); } #[cargo_test] fn legacy_binary_paths_warnings() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "1.0.0" authors = [] [[bin]] name = "bar" "#, ) .file("src/lib.rs", "") .file("src/main.rs", "fn main() {}") .build(); p.cargo("build -v") .with_stderr_contains( "\ [WARNING] path `[..]src/main.rs` was erroneously implicitly accepted for binary `bar`, please set bin.path in Cargo.toml", ) .run(); let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "1.0.0" authors = [] [[bin]] name = "bar" "#, ) .file("src/lib.rs", "") .file("src/bin/main.rs", "fn main() {}") .build(); p.cargo("build -v") .with_stderr_contains( "\ [WARNING] path `[..]src/bin/main.rs` was erroneously implicitly accepted for binary `bar`, please set bin.path in Cargo.toml", ) .run(); let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "1.0.0" authors = [] [[bin]] name = "bar" "#, ) .file("src/bar.rs", "fn main() {}") .build(); p.cargo("build -v") .with_stderr_contains( "\ [WARNING] path `[..]src/bar.rs` was erroneously implicitly accepted for binary `bar`, please set bin.path in Cargo.toml", ) .run(); } #[cargo_test] fn implic
let p = project() .file( "src/lib.rs", r#" pub fn get_hello() -> &'static str { "Hello" } pub fn get_goodbye() -> &'static str { "Goodbye" } pub fn get_world() -> &'static str { "World" } "#, ) .file( "examples/hello.rs", r#" extern crate foo; fn main() { println!("{}, {}!", foo::get_hello(), foo::get_world()); } "#, ) .file( "examples/goodbye.rs", r#" extern crate foo; fn main() { println!("{}, {}!", foo::get_goodbye(), foo::get_world()); } "#, ) .build(); p.cargo("build --examples").run(); p.process(&p.bin("examples/hello")) .with_stdout("Hello, World!\n") .run(); p.process(&p.bin("examples/goodbye")) .with_stdout("Goodbye, World!\n") .run(); } #[cargo_test] fn standard_build_no_ndebug() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file( "src/foo.rs", r#" fn main() { if cfg!(debug_assertions) { println!("slow") } else { println!("fast") } } "#, ) .build(); p.cargo("build").run(); p.process(&p.bin("foo")).with_stdout("slow\n").run(); } #[cargo_test] fn release_build_ndebug() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file( "src/foo.rs", r#" fn main() { if cfg!(debug_assertions) { println!("slow") } else { println!("fast") } } "#, ) .build(); p.cargo("build --release").run(); p.process(&p.release_bin("foo")).with_stdout("fast\n").run(); } #[cargo_test] fn inferred_main_bin() { let p = project().file("src/main.rs", "fn main() {}").build(); p.cargo("build").run(); p.process(&p.bin("foo")).run(); } #[cargo_test] fn deletion_causes_failure() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [dependencies.bar] path = "bar" "#, ) .file("src/main.rs", "extern crate bar; fn main() {}") .file("bar/Cargo.toml", &basic_manifest("bar", "0.0.1")) .file("bar/src/lib.rs", "") .build(); p.cargo("build").run(); p.change_file("Cargo.toml", &basic_manifest("foo", "0.0.1")); p.cargo("build") .with_status(101) .with_stderr_contains("[..]can't find crate for `bar`") .run(); } #[cargo_test] fn bad_cargo_toml_in_target_dir() { let p = project() .file("src/main.rs", "fn main() {}") .file("target/Cargo.toml", "bad-toml") .build(); p.cargo("build").run(); p.process(&p.bin("foo")).run(); } #[cargo_test] fn lib_with_standard_name() { let p = project() .file("Cargo.toml", &basic_manifest("syntax", "0.0.1")) .file("src/lib.rs", "pub fn foo() {}") .file( "src/main.rs", "extern crate syntax; fn main() { syntax::foo() }", ) .build(); p.cargo("build") .with_stderr( "\ [COMPILING] syntax v0.0.1 ([CWD]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn simple_staticlib() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" authors = [] version = "0.0.1" [lib] name = "foo" crate-type = ["staticlib"] "#, ) .file("src/lib.rs", "pub fn foo() {}") .build(); // env var is a test for #1381 p.cargo("build").env("CARGO_LOG", "nekoneko=trace").run(); } #[cargo_test] fn staticlib_rlib_and_bin() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" authors = [] version = "0.0.1" [lib] name = "foo" crate-type = ["staticlib", "rlib"] "#, ) .file("src/lib.rs", "pub fn foo() {}") .file("src/main.rs", "extern crate foo; fn main() { foo::foo(); }") .build(); p.cargo("build -v").run(); } #[cargo_test] fn opt_out_of_bin() { let p = project() .file( "Cargo.toml", r#" bin = [] [package] name = "foo" authors = [] version = "0.0.1" "#, ) .file("src/lib.rs", "") .file("src/main.rs", "bad syntax") .build(); p.cargo("build").run(); } #[cargo_test] fn single_lib() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" authors = [] version = "0.0.1" [lib] name = "foo" path = "src/bar.rs" "#, ) .file("src/bar.rs", "") .build(); p.cargo("build").run(); } #[cargo_test] fn freshness_ignores_excluded() { let foo = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] build = "build.rs" exclude = ["src/b*.rs"] "#, ) .file("build.rs", "fn main() {}") .file("src/lib.rs", "pub fn bar() -> i32 { 1 }") .build(); foo.root().move_into_the_past(); foo.cargo("build") .with_stderr( "\ [COMPILING] foo v0.0.0 ([CWD]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); // Smoke test to make sure it doesn't compile again println!("first pass"); foo.cargo("build").with_stdout("").run(); // Modify an ignored file and make sure we don't rebuild println!("second pass"); foo.change_file("src/bar.rs", ""); foo.cargo("build").with_stdout("").run(); } #[cargo_test] fn rebuild_preserves_out_dir() { let foo = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] build = 'build.rs' "#, ) .file( "build.rs", r#" use std::env; use std::fs::File; use std::path::Path; fn main() { let path = Path::new(&env::var("OUT_DIR").unwrap()).join("foo"); if env::var_os("FIRST").is_some() { File::create(&path).unwrap(); } else { File::create(&path).unwrap(); } } "#, ) .file("src/lib.rs", "pub fn bar() -> i32 { 1 }") .build(); foo.root().move_into_the_past(); foo.cargo("build") .env("FIRST", "1") .with_stderr( "\ [COMPILING] foo v0.0.0 ([CWD]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); foo.change_file("src/bar.rs", ""); foo.cargo("build") .with_stderr( "\ [COMPILING] foo v0.0.0 ([CWD]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn dep_no_libs() { let foo = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [dependencies.bar] path = "bar" "#, ) .file("src/lib.rs", "pub fn bar() -> i32 { 1 }") .file("bar/Cargo.toml", &basic_manifest("bar", "0.0.0")) .file("bar/src/main.rs", "") .build(); foo.cargo("build").run(); } #[cargo_test] fn recompile_space_in_name() { let foo = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [lib] name = "foo" path = "src/my lib.rs" "#, ) .file("src/my lib.rs", "") .build(); foo.cargo("build").run(); foo.root().move_into_the_past(); foo.cargo("build").with_stdout("").run(); } #[cfg(unix)] #[cargo_test] fn credentials_is_unreadable() { use cargo_test_support::paths::home; use std::os::unix::prelude::*; let p = project() .file("Cargo.toml", &basic_manifest("foo", "0.1.0")) .file("src/lib.rs", "") .build(); let credentials = home().join(".cargo/credentials"); t!(fs::create_dir_all(credentials.parent().unwrap())); t!(fs::write( &credentials, r#" [registry] token = "api-token" "# )); let stat = fs::metadata(credentials.as_path()).unwrap(); let mut perms = stat.permissions(); perms.set_mode(0o000); fs::set_permissions(credentials, perms).unwrap(); p.cargo("build").run(); } #[cfg(unix)] #[cargo_test] fn ignore_bad_directories() { use std::os::unix::prelude::*; let foo = project() .file("Cargo.toml", &basic_manifest("foo", "0.0.0")) .file("src/lib.rs", "") .build(); let dir = foo.root().join("tmp"); fs::create_dir(&dir).unwrap(); let stat = fs::metadata(&dir).unwrap(); let mut perms = stat.permissions(); perms.set_mode(0o644); fs::set_permissions(&dir, perms.clone()).unwrap(); foo.cargo("build").run(); perms.set_mode(0o755); fs::set_permissions(&dir, perms).unwrap(); } #[cargo_test] fn bad_cargo_config() { let foo = project() .file("Cargo.toml", &basic_manifest("foo", "0.0.0")) .file("src/lib.rs", "") .file(".cargo/config", "this is not valid toml") .build(); foo.cargo("build -v") .with_status(101) .with_stderr( "\ [ERROR] could not load Cargo configuration Caused by: could not parse TOML configuration in `[..]` Caused by: could not parse input as TOML Caused by: TOML parse error at line 1, column 6 | 1 | this is not valid toml | ^ Unexpected `i` Expected `.` or `=` ", ) .run(); } #[cargo_test] fn cargo_platform_specific_dependency() { let host = rustc_host(); let p = project() .file( "Cargo.toml", &format!( r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] build = "build.rs" [target.{host}.dependencies] dep = {{ path = "dep" }} [target.{host}.build-dependencies] build = {{ path = "build" }} [target.{host}.dev-dependencies] dev = {{ path = "dev" }} "#, host = host ), ) .file("src/main.rs", "extern crate dep; fn main() { dep::dep() }") .file( "tests/foo.rs", "extern crate dev; #[test] fn foo() { dev::dev() }", ) .file( "build.rs", "extern crate build; fn main() { build::build(); }", ) .file("dep/Cargo.toml", &basic_manifest("dep", "0.5.0")) .file("dep/src/lib.rs", "pub fn dep() {}") .file("build/Cargo.toml", &basic_manifest("build", "0.5.0")) .file("build/src/lib.rs", "pub fn build() {}") .file("dev/Cargo.toml", &basic_manifest("dev", "0.5.0")) .file("dev/src/lib.rs", "pub fn dev() {}") .build(); p.cargo("build").run(); assert!(p.bin("foo").is_file()); p.cargo("test").run(); } #[cargo_test] fn cargo_platform_specific_dependency_build_dependencies_conflicting_warning() { let host = rustc_host(); let p = project() .file( "Cargo.toml", &format!( r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] build = "build.rs" [target.{host}.build-dependencies] build = {{ path = "build" }} [target.{host}.build_dependencies] build = {{ path = "build" }} "#, host = host ), ) .file("src/main.rs", "fn main() { }") .file( "build.rs", "extern crate build; fn main() { build::build(); }", ) .file("build/Cargo.toml", &basic_manifest("build", "0.5.0")) .file("build/src/lib.rs", "pub fn build() {}") .build(); p.cargo("build") .with_stderr_contains( format!("[WARNING] conflicting between `build-dependencies` and `build_dependencies` in the `{}` platform target.\n `build_dependencies` is ignored and not recommended for use in the future", host) ) .run(); assert!(p.bin("foo").is_file()); } #[cargo_test] fn cargo_platform_specific_dependency_dev_dependencies_conflicting_warning() { let host = rustc_host(); let p = project() .file( "Cargo.toml", &format!( r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] [target.{host}.dev-dependencies] dev = {{ path = "dev" }} [target.{host}.dev_dependencies] dev = {{ path = "dev" }} "#, host = host ), ) .file("src/main.rs", "fn main() { }") .file( "tests/foo.rs", "extern crate dev; #[test] fn foo() { dev::dev() }", ) .file("dev/Cargo.toml", &basic_manifest("dev", "0.5.0")) .file("dev/src/lib.rs", "pub fn dev() {}") .build(); p.cargo("build") .with_stderr_contains( format!("[WARNING] conflicting between `dev-dependencies` and `dev_dependencies` in the `{}` platform target.\n `dev_dependencies` is ignored and not recommended for use in the future", host) ) .run(); assert!(p.bin("foo").is_file()); p.cargo("test").run(); } #[cargo_test] fn bad_platform_specific_dependency() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] [target.wrong-target.dependencies.bar] path = "bar" "#, ) .file("src/main.rs", &main_file(r#""{}", bar::gimme()"#, &["bar"])) .file("bar/Cargo.toml", &basic_manifest("bar", "0.5.0")) .file( "bar/src/lib.rs", r#"pub fn gimme() -> String { format!("") }"#, ) .build(); p.cargo("build") .with_status(101) .with_stderr_contains("[..]can't find crate for `bar`") .run(); } #[cargo_test] fn cargo_platform_specific_dependency_wrong_platform() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] [target.non-existing-triplet.dependencies.bar] path = "bar" "#, ) .file("src/main.rs", "fn main() {}") .file("bar/Cargo.toml", &basic_manifest("bar", "0.5.0")) .file( "bar/src/lib.rs", "invalid rust file, should not be compiled", ) .build(); p.cargo("build").run(); assert!(p.bin("foo").is_file()); p.process(&p.bin("foo")).run(); let lockfile = p.read_lockfile(); assert!(lockfile.contains("bar")); } #[cargo_test] fn example_as_lib() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [[example]] name = "ex" crate-type = ["lib"] "#, ) .file("src/lib.rs", "") .file("examples/ex.rs", "") .build(); p.cargo("build --example=ex").run(); assert!(p.example_lib("ex", "lib").is_file()); } #[cargo_test] fn example_as_rlib() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [[example]] name = "ex" crate-type = ["rlib"] "#, ) .file("src/lib.rs", "") .file("examples/ex.rs", "") .build(); p.cargo("build --example=ex").run(); assert!(p.example_lib("ex", "rlib").is_file()); } #[cargo_test] fn example_as_dylib() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [[example]] name = "ex" crate-type = ["dylib"] "#, ) .file("src/lib.rs", "") .file("examples/ex.rs", "") .build(); p.cargo("build --example=ex").run(); assert!(p.example_lib("ex", "dylib").is_file()); } #[cargo_test] fn example_as_proc_macro() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [[example]] name = "ex" crate-type = ["proc-macro"] "#, ) .file("src/lib.rs", "") .file( "examples/ex.rs", r#" extern crate proc_macro; use proc_macro::TokenStream; #[proc_macro] pub fn eat(_item: TokenStream) -> TokenStream { "".parse().unwrap() } "#, ) .build(); p.cargo("build --example=ex").run(); assert!(p.example_lib("ex", "proc-macro").is_file()); } #[cargo_test] fn example_bin_same_name() { let p = project() .file("src/main.rs", "fn main() {}") .file("examples/foo.rs", "fn main() {}") .build(); p.cargo("build --examples").run(); assert!(!p.bin("foo").is_file()); // We expect a file of the form bin/foo-{metadata_hash} assert!(p.bin("examples/foo").is_file()); p.cargo("build --examples").run(); assert!(!p.bin("foo").is_file()); // We expect a file of the form bin/foo-{metadata_hash} assert!(p.bin("examples/foo").is_file()); } #[cargo_test] fn compile_then_delete() { let p = project().file("src/main.rs", "fn main() {}").build(); p.cargo("run -v").run(); assert!(p.bin("foo").is_file()); if cfg!(windows) { // On windows unlinking immediately after running often fails, so sleep sleep_ms(100); } fs::remove_file(&p.bin("foo")).unwrap(); p.cargo("run -v").run(); } #[cargo_test] fn transitive_dependencies_not_available() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [dependencies.aaaaa] path = "a" "#, ) .file( "src/main.rs", "extern crate bbbbb; extern crate aaaaa; fn main() {}", ) .file( "a/Cargo.toml", r#" [package] name = "aaaaa" version = "0.0.1" authors = [] [dependencies.bbbbb] path = "../b" "#, ) .file("a/src/lib.rs", "extern crate bbbbb;") .file("b/Cargo.toml", &basic_manifest("bbbbb", "0.0.1")) .file("b/src/lib.rs", "") .build(); p.cargo("build -v") .with_status(101) .with_stderr_contains("[..] can't find crate for `bbbbb`[..]") .run(); } #[cargo_test] fn cyclic_deps_rejected() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [dependencies.a] path = "a" "#, ) .file("src/lib.rs", "") .file( "a/Cargo.toml", r#" [package] name = "a" version = "0.0.1" authors = [] [dependencies.foo] path = ".." "#, ) .file("a/src/lib.rs", "") .build(); p.cargo("build -v") .with_status(101) .with_stderr( "[ERROR] cyclic package dependency: package `a v0.0.1 ([CWD]/a)` depends on itself. Cycle: package `a v0.0.1 ([CWD]/a)` ... which satisfies path dependency `a` of package `foo v0.0.1 ([CWD])` ... which satisfies path dependency `foo` of package `a v0.0.1 ([..])`", ).run(); } #[cargo_test] fn predictable_filenames() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [lib] name = "foo" crate-type = ["dylib", "rlib"] "#, ) .file("src/lib.rs", "") .build(); p.cargo("build -v").run(); assert!(p.root().join("target/debug/libfoo.rlib").is_file()); let dylib_name = format!("{}foo{}", env::consts::DLL_PREFIX, env::consts::DLL_SUFFIX); assert!(p.root().join("target/debug").join(dylib_name).is_file()); } #[cargo_test] fn dashes_to_underscores() { let p = project() .file("Cargo.toml", &basic_manifest("foo-bar", "0.0.1")) .file("src/lib.rs", "") .file("src/main.rs", "extern crate foo_bar; fn main() {}") .build(); p.cargo("build -v").run(); assert!(p.bin("foo-bar").is_file()); } #[cargo_test] fn dashes_in_crate_name_bad() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [lib] name = "foo-bar" "#, ) .file("src/lib.rs", "") .file("src/main.rs", "extern crate foo_bar; fn main() {}") .build(); p.cargo("build -v") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]/foo/Cargo.toml` Caused by: library target names cannot contain hyphens: foo-bar ", ) .run(); } #[cargo_test] fn rustc_env_var() { let p = project().file("src/lib.rs", "").build(); p.cargo("build -v") .env("RUSTC", "rustc-that-does-not-exist") .with_status(101) .with_stderr( "\ [ERROR] could not execute process `rustc-that-does-not-exist -vV` ([..]) Caused by: [..] ", ) .run(); assert!(!p.bin("a").is_file()); } #[cargo_test] fn filtering() { let p = project() .file("src/lib.rs", "") .file("src/bin/a.rs", "fn main() {}") .file("src/bin/b.rs", "fn main() {}") .file("examples/a.rs", "fn main() {}") .file("examples/b.rs", "fn main() {}") .build(); p.cargo("build --lib").run(); assert!(!p.bin("a").is_file()); p.cargo("build --bin=a --example=a").run(); assert!(p.bin("a").is_file()); assert!(!p.bin("b").is_file()); assert!(p.bin("examples/a").is_file()); assert!(!p.bin("examples/b").is_file()); } #[cargo_test] fn filtering_implicit_bins() { let p = project() .file("src/lib.rs", "") .file("src/bin/a.rs", "fn main() {}") .file("src/bin/b.rs", "fn main() {}") .file("examples/a.rs", "fn main() {}") .file("examples/b.rs", "fn main() {}") .build(); p.cargo("build --bins").run(); assert!(p.bin("a").is_file()); assert!(p.bin("b").is_file()); assert!(!p.bin("examples/a").is_file()); assert!(!p.bin("examples/b").is_file()); } #[cargo_test] fn filtering_implicit_examples() { let p = project() .file("src/lib.rs", "") .file("src/bin/a.rs", "fn main() {}") .file("src/bin/b.rs", "fn main() {}") .file("examples/a.rs", "fn main() {}") .file("examples/b.rs", "fn main() {}") .build(); p.cargo("build --examples").run(); assert!(!p.bin("a").is_file()); assert!(!p.bin("b").is_file()); assert!(p.bin("examples/a").is_file()); assert!(p.bin("examples/b").is_file()); } #[cargo_test] fn ignore_dotfile() { let p = project() .file("src/bin/.a.rs", "") .file("src/bin/a.rs", "fn main() {}") .build(); p.cargo("build").run(); } #[cargo_test] fn ignore_dotdirs() { let p = project() .file("src/bin/a.rs", "fn main() {}") .file(".git/Cargo.toml", "") .file(".pc/dummy-fix.patch/Cargo.toml", "") .build(); p.cargo("build").run(); } #[cargo_test] fn dotdir_root() { let p = ProjectBuilder::new(root().join(".foo")) .file("src/bin/a.rs", "fn main() {}") .build(); p.cargo("build").run(); } #[cargo_test] fn custom_target_dir_env() { let p = project().file("src/main.rs", "fn main() {}").build(); let exe_name = format!("foo{}", env::consts::EXE_SUFFIX); p.cargo("build").env("CARGO_TARGET_DIR", "foo/target").run(); assert!(p.root().join("foo/target/debug").join(&exe_name).is_file()); assert!(!p.root().join("target/debug").join(&exe_name).is_file()); p.cargo("build").run(); assert!(p.root().join("foo/target/debug").join(&exe_name).is_file()); assert!(p.root().join("target/debug").join(&exe_name).is_file()); p.cargo("build") .env("CARGO_BUILD_TARGET_DIR", "foo2/target") .run(); assert!(p.root().join("foo2/target/debug").join(&exe_name).is_file()); p.change_file( ".cargo/config", r#" [build] target-dir = "foo/target" "#, ); p.cargo("build").env("CARGO_TARGET_DIR", "bar/target").run(); assert!(p.root().join("bar/target/debug").join(&exe_name).is_file()); assert!(p.root().join("foo/target/debug").join(&exe_name).is_file()); assert!(p.root().join("target/debug").join(&exe_name).is_file()); } #[cargo_test] fn custom_target_dir_line_parameter() { let p = project().file("src/main.rs", "fn main() {}").build(); let exe_name = format!("foo{}", env::consts::EXE_SUFFIX); p.cargo("build --target-dir foo/target").run(); assert!(p.root().join("foo/target/debug").join(&exe_name).is_file()); assert!(!p.root().join("target/debug").join(&exe_name).is_file()); p.cargo("build").run(); assert!(p.root().join("foo/target/debug").join(&exe_name).is_file()); assert!(p.root().join("target/debug").join(&exe_name).is_file()); p.change_file( ".cargo/config", r#" [build] target-dir = "foo/target" "#, ); p.cargo("build --target-dir bar/target").run(); assert!(p.root().join("bar/target/debug").join(&exe_name).is_file()); assert!(p.root().join("foo/target/debug").join(&exe_name).is_file()); assert!(p.root().join("target/debug").join(&exe_name).is_file()); p.cargo("build --target-dir foobar/target") .env("CARGO_TARGET_DIR", "bar/target") .run(); assert!(p .root() .join("foobar/target/debug") .join(&exe_name) .is_file()); assert!(p.root().join("bar/target/debug").join(&exe_name).is_file()); assert!(p.root().join("foo/target/debug").join(&exe_name).is_file()); assert!(p.root().join("target/debug").join(&exe_name).is_file()); } #[cargo_test] fn build_multiple_packages() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [dependencies.d1] path = "d1" [dependencies.d2] path = "d2" [[bin]] name = "foo" "#, ) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .file("d1/Cargo.toml", &basic_bin_manifest("d1")) .file("d1/src/lib.rs", "") .file("d1/src/main.rs", "fn main() { println!(\"d1\"); }") .file( "d2/Cargo.toml", r#" [package] name = "d2" version = "0.0.1" authors = [] [[bin]] name = "d2" doctest = false "#, ) .file("d2/src/main.rs", "fn main() { println!(\"d2\"); }") .build(); p.cargo("build -p d1 -p d2 -p foo").run(); assert!(p.bin("foo").is_file()); p.process(&p.bin("foo")).with_stdout("i am foo\n").run(); let d1_path = &p .build_dir() .join("debug") .join(format!("d1{}", env::consts::EXE_SUFFIX)); let d2_path = &p .build_dir() .join("debug") .join(format!("d2{}", env::consts::EXE_SUFFIX)); assert!(d1_path.is_file()); p.process(d1_path).with_stdout("d1").run(); assert!(d2_path.is_file()); p.process(d2_path).with_stdout("d2").run(); } #[cargo_test] fn invalid_spec() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [dependencies.d1] path = "d1" [[bin]] name = "foo" "#, ) .file("src/bin/foo.rs", &main_file(r#""i am foo""#, &[])) .file("d1/Cargo.toml", &basic_bin_manifest("d1")) .file("d1/src/lib.rs", "") .file("d1/src/main.rs", "fn main() { println!(\"d1\"); }") .build(); p.cargo("build -p notAValidDep") .with_status(101) .with_stderr("[ERROR] package ID specification `notAValidDep` did not match any packages") .run(); p.cargo("build -p d1 -p notAValidDep") .with_status(101) .with_stderr("[ERROR] package ID specification `notAValidDep` did not match any packages") .run(); } #[cargo_test] fn manifest_with_bom_is_ok() { let p = project() .file( "Cargo.toml", "\u{FEFF} [package] name = \"foo\" version = \"0.0.1\" authors = [] ", ) .file("src/lib.rs", "") .build(); p.cargo("build -v").run(); } #[cargo_test] fn panic_abort_compiles_with_panic_abort() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [profile.dev] panic = 'abort' "#, ) .file("src/lib.rs", "") .build(); p.cargo("build -v") .with_stderr_contains("[..] -C panic=abort [..]") .run(); } #[cargo_test] fn compiler_json_error_format() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.0" authors = ["[email protected]"] [dependencies.bar] path = "bar" "#, ) .file( "build.rs", "fn main() { println!(\"cargo:rustc-cfg=xyz\") }", ) .file("src/main.rs", "fn main() { let unused = 92; }") .file("bar/Cargo.toml", &basic_manifest("bar", "0.5.0")) .file("bar/src/lib.rs", r#"fn dead() {}"#) .build(); let output = |fresh| { r#" { "reason":"compiler-artifact", "package_id":"foo 0.5.0 ([..])", "manifest_path": "[..]", "target":{ "kind":["custom-build"], "crate_types":["bin"], "doc": false, "doctest": false, "edition": "2015", "name":"build-script-build", "src_path":"[..]build.rs", "test": false }, "profile": { "debug_assertions": true, "debuginfo": 2, "opt_level": "0", "overflow_checks": true, "test": false }, "executable": null, "features": [], "filenames": "{...}", "fresh": $FRESH } { "reason":"compiler-message", "package_id":"bar 0.5.0 ([..])", "manifest_path": "[..]", "target":{ "kind":["lib"], "crate_types":["lib"], "doc": true, "doctest": true, "edition": "2015", "name":"bar", "src_path":"[..]lib.rs", "test": true }, "message":"{...}" } { "reason":"compiler-artifact", "profile": { "debug_assertions": true, "debuginfo": 2, "opt_level": "0", "overflow_checks": true, "test": false }, "executable": null, "features": [], "package_id":"bar 0.5.0 ([..])", "manifest_path": "[..]", "target":{ "kind":["lib"], "crate_types":["lib"], "doc": true, "doctest": true, "edition": "2015", "name":"bar", "src_path":"[..]lib.rs", "test": true }, "filenames":[ "[..].rlib", "[..].rmeta" ], "fresh": $FRESH } { "reason":"build-script-executed", "package_id":"foo 0.5.0 ([..])", "linked_libs":[], "linked_paths":[], "env":[], "cfgs":["xyz"], "out_dir": "[..]target/debug/build/foo-[..]/out" } { "reason":"compiler-message", "package_id":"foo 0.5.0 ([..])", "manifest_path": "[..]", "target":{ "kind":["bin"], "crate_types":["bin"], "doc": true, "doctest": false, "edition": "2015", "name":"foo", "src_path":"[..]main.rs", "test": true }, "message":"{...}" } { "reason":"compiler-artifact", "package_id":"foo 0.5.0 ([..])", "manifest_path": "[..]", "target":{ "kind":["bin"], "crate_types":["bin"], "doc": true, "doctest": false, "edition": "2015", "name":"foo", "src_path":"[..]main.rs", "test": true }, "profile": { "debug_assertions": true, "debuginfo": 2, "opt_level": "0", "overflow_checks": true, "test": false }, "executable": "[..]/foo/target/debug/foo[EXE]", "features": [], "filenames": "{...}", "fresh": $FRESH } {"reason": "build-finished", "success": true} "# .replace("$FRESH", fresh) }; // Use `jobs=1` to ensure that the order of messages is consistent. p.cargo("build -v --message-format=json --jobs=1") .with_json_contains_unordered(&output("false")) .run(); // With fresh build, we should repeat the artifacts, // and replay the cached compiler warnings. p.cargo("build -v --message-format=json --jobs=1") .with_json_contains_unordered(&output("true")) .run(); } #[cargo_test] fn wrong_message_format_option() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/main.rs", "fn main() {}") .build(); p.cargo("build --message-format XML") .with_status(101) .with_stderr_contains( "\ error: invalid message format specifier: `xml` ", ) .run(); } #[cargo_test] fn message_format_json_forward_stderr() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/main.rs", "fn main() { let unused = 0; }") .build(); p.cargo("rustc --release --bin foo --message-format JSON") .with_json_contains_unordered( r#" { "reason":"compiler-message", "package_id":"foo 0.5.0 ([..])", "manifest_path": "[..]", "target":{ "kind":["bin"], "crate_types":["bin"], "doc": true, "doctest": false, "edition": "2015", "name":"foo", "src_path":"[..]", "test": true }, "message":"{...}" } { "reason":"compiler-artifact", "package_id":"foo 0.5.0 ([..])", "manifest_path": "[..]", "target":{ "kind":["bin"], "crate_types":["bin"], "doc": true, "doctest": false, "edition": "2015", "name":"foo", "src_path":"[..]", "test": true }, "profile":{ "debug_assertions":false, "debuginfo":null, "opt_level":"3", "overflow_checks": false, "test":false }, "executable": "{...}", "features":[], "filenames": "{...}", "fresh": false } {"reason": "build-finished", "success": true} "#, ) .run(); } #[cargo_test] fn no_warn_about_package_metadata() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] [package.metadata] foo = "bar" a = true b = 3 [package.metadata.another] bar = 3 "#, ) .file("src/lib.rs", "") .build(); p.cargo("build") .with_stderr( "[..] foo v0.0.1 ([..])\n\ [FINISHED] dev [unoptimized + debuginfo] target(s) in [..]\n", ) .run(); } #[cargo_test] fn no_warn_about_workspace_metadata() { let p = project() .file( "Cargo.toml", r#" [workspace] members = ["foo"] [workspace.metadata] something = "something_else" x = 1 y = 2 [workspace.metadata.another] bar = 12 "#, ) .file( "foo/Cargo.toml", r#" [package] name = "foo" version = "0.0.1" "#, ) .file("foo/src/lib.rs", "") .build(); p.cargo("build") .with_stderr( "[..] foo v0.0.1 ([..])\n\ [FINISHED] dev [unoptimized + debuginfo] target(s) in [..]\n", ) .run(); } #[cargo_test] fn cargo_build_empty_target() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/main.rs", "fn main() {}") .build(); p.cargo("build --target") .arg("") .with_status(101) .with_stderr_contains("[..] target was empty") .run(); } #[cargo_test] fn build_all_workspace() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" [dependencies] bar = { path = "bar" } [workspace] "#, ) .file("src/main.rs", "fn main() {}") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "pub fn bar() {}") .build(); p.cargo("build --workspace") .with_stderr( "\ [COMPILING] bar v0.1.0 ([..]) [COMPILING] foo v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn build_all_exclude() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" [workspace] members = ["bar", "baz"] "#, ) .file("src/main.rs", "fn main() {}") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "pub fn bar() {}") .file("baz/Cargo.toml", &basic_manifest("baz", "0.1.0")) .file("baz/src/lib.rs", "pub fn baz() { break_the_build(); }") .build(); p.cargo("build --workspace --exclude baz") .with_stderr_does_not_contain("[COMPILING] baz v0.1.0 [..]") .with_stderr_unordered( "\ [COMPILING] foo v0.1.0 ([..]) [COMPILING] bar v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn build_all_exclude_not_found() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" [workspace] members = ["bar"] "#, ) .file("src/main.rs", "fn main() {}") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "pub fn bar() {}") .build(); p.cargo("build --workspace --exclude baz") .with_stderr_does_not_contain("[COMPILING] baz v0.1.0 [..]") .with_stderr_unordered( "\ [WARNING] excluded package(s) `baz` not found in workspace [..] [COMPILING] foo v0.1.0 ([..]) [COMPILING] bar v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn build_all_exclude_glob() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" [workspace] members = ["bar", "baz"] "#, ) .file("src/main.rs", "fn main() {}") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "pub fn bar() {}") .file("baz/Cargo.toml", &basic_manifest("baz", "0.1.0")) .file("baz/src/lib.rs", "pub fn baz() { break_the_build(); }") .build(); p.cargo("build --workspace --exclude '*z'") .with_stderr_does_not_contain("[COMPILING] baz v0.1.0 [..]") .with_stderr_unordered( "\ [COMPILING] foo v0.1.0 ([..]) [COMPILING] bar v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn build_all_exclude_glob_not_found() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" [workspace] members = ["bar"] "#, ) .file("src/main.rs", "fn main() {}") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "pub fn bar() {}") .build(); p.cargo("build --workspace --exclude '*z'") .with_stderr_does_not_contain("[COMPILING] baz v0.1.0 [..]") .with_stderr( "\ [WARNING] excluded package pattern(s) `*z` not found in workspace [..] [COMPILING] [..] v0.1.0 ([..]) [COMPILING] [..] v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn build_all_exclude_broken_glob() { let p = project().file("src/main.rs", "fn main() {}").build(); p.cargo("build --workspace --exclude '[*z'") .with_status(101) .with_stderr_contains("[ERROR] cannot build glob pattern from `[*z`") .run(); } #[cargo_test] fn build_all_workspace_implicit_examples() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" [dependencies] bar = { path = "bar" } [workspace] "#, ) .file("src/lib.rs", "") .file("src/bin/a.rs", "fn main() {}") .file("src/bin/b.rs", "fn main() {}") .file("examples/c.rs", "fn main() {}") .file("examples/d.rs", "fn main() {}") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "") .file("bar/src/bin/e.rs", "fn main() {}") .file("bar/src/bin/f.rs", "fn main() {}") .file("bar/examples/g.rs", "fn main() {}") .file("bar/examples/h.rs", "fn main() {}") .build(); p.cargo("build --workspace --examples") .with_stderr( "[..] Compiling bar v0.1.0 ([..])\n\ [..] Compiling foo v0.1.0 ([..])\n\ [..] Finished dev [unoptimized + debuginfo] target(s) in [..]\n", ) .run(); assert!(!p.bin("a").is_file()); assert!(!p.bin("b").is_file()); assert!(p.bin("examples/c").is_file()); assert!(p.bin("examples/d").is_file()); assert!(!p.bin("e").is_file()); assert!(!p.bin("f").is_file()); assert!(p.bin("examples/g").is_file()); assert!(p.bin("examples/h").is_file()); } #[cargo_test] fn build_all_virtual_manifest() { let p = project() .file( "Cargo.toml", r#" [workspace] members = ["bar", "baz"] "#, ) .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "pub fn bar() {}") .file("baz/Cargo.toml", &basic_manifest("baz", "0.1.0")) .file("baz/src/lib.rs", "pub fn baz() {}") .build(); // The order in which bar and baz are built is not guaranteed p.cargo("build --workspace") .with_stderr_unordered( "\ [COMPILING] baz v0.1.0 ([..]) [COMPILING] bar v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn build_virtual_manifest_all_implied() { let p = project() .file( "Cargo.toml", r#" [workspace] members = ["bar", "baz"] "#, ) .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "pub fn bar() {}") .file("baz/Cargo.toml", &basic_manifest("baz", "0.1.0")) .file("baz/src/lib.rs", "pub fn baz() {}") .build(); // The order in which `bar` and `baz` are built is not guaranteed. p.cargo("build") .with_stderr_unordered( "\ [COMPILING] baz v0.1.0 ([..]) [COMPILING] bar v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn build_virtual_manifest_one_project() { let p = project() .file( "Cargo.toml", r#" [workspace] members = ["bar", "baz"] "#, ) .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "pub fn bar() {}") .file("baz/Cargo.toml", &basic_manifest("baz", "0.1.0")) .file("baz/src/lib.rs", "pub fn baz() { break_the_build(); }") .build(); p.cargo("build -p bar") .with_stderr_does_not_contain("[..]baz[..]") .with_stderr( "\ [COMPILING] bar v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn build_virtual_manifest_glob() { let p = project() .file( "Cargo.toml", r#" [workspace] members = ["bar", "baz"] "#, ) .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "pub fn bar() { break_the_build(); }") .file("baz/Cargo.toml", &basic_manifest("baz", "0.1.0")) .file("baz/src/lib.rs", "pub fn baz() {}") .build(); p.cargo("build -p '*z'") .with_stderr_does_not_contain("[..]bar[..]") .with_stderr( "\ [COMPILING] baz v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn build_virtual_manifest_glob_not_found() { let p = project() .file( "Cargo.toml", r#" [workspace] members = ["bar"] "#, ) .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "pub fn bar() {}") .build(); p.cargo("build -p bar -p '*z'") .with_status(101) .with_stderr("[ERROR] package pattern(s) `*z` not found in workspace [..]") .run(); } #[cargo_test] fn build_virtual_manifest_broken_glob() { let p = project() .file( "Cargo.toml", r#" [workspace] members = ["bar"] "#, ) .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "pub fn bar() {}") .build(); p.cargo("build -p '[*z'") .with_status(101) .with_stderr_contains("[ERROR] cannot build glob pattern from `[*z`") .run(); } #[cargo_test] fn build_all_virtual_manifest_implicit_examples() { let p = project() .file( "Cargo.toml", r#" [workspace] members = ["bar", "baz"] "#, ) .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "") .file("bar/src/bin/a.rs", "fn main() {}") .file("bar/src/bin/b.rs", "fn main() {}") .file("bar/examples/c.rs", "fn main() {}") .file("bar/examples/d.rs", "fn main() {}") .file("baz/Cargo.toml", &basic_manifest("baz", "0.1.0")) .file("baz/src/lib.rs", "") .file("baz/src/bin/e.rs", "fn main() {}") .file("baz/src/bin/f.rs", "fn main() {}") .file("baz/examples/g.rs", "fn main() {}") .file("baz/examples/h.rs", "fn main() {}") .build(); // The order in which bar and baz are built is not guaranteed p.cargo("build --workspace --examples") .with_stderr_unordered( "\ [COMPILING] baz v0.1.0 ([..]) [COMPILING] bar v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); assert!(!p.bin("a").is_file()); assert!(!p.bin("b").is_file()); assert!(p.bin("examples/c").is_file()); assert!(p.bin("examples/d").is_file()); assert!(!p.bin("e").is_file()); assert!(!p.bin("f").is_file()); assert!(p.bin("examples/g").is_file()); assert!(p.bin("examples/h").is_file()); } #[cargo_test] fn build_all_member_dependency_same_name() { let p = project() .file( "Cargo.toml", r#" [workspace] members = ["a"] "#, ) .file( "a/Cargo.toml", r#" [project] name = "a" version = "0.1.0" [dependencies] a = "0.1.0" "#, ) .file("a/src/lib.rs", "pub fn a() {}") .build(); Package::new("a", "0.1.0").publish(); p.cargo("build --workspace") .with_stderr( "[UPDATING] `[..]` index\n\ [DOWNLOADING] crates ...\n\ [DOWNLOADED] a v0.1.0 ([..])\n\ [COMPILING] a v0.1.0\n\ [COMPILING] a v0.1.0 ([..])\n\ [FINISHED] dev [unoptimized + debuginfo] target(s) in [..]\n", ) .run(); } #[cargo_test] fn run_proper_binary() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" authors = [] version = "0.0.0" [[bin]] name = "main" [[bin]] name = "other" "#, ) .file("src/lib.rs", "") .file( "src/bin/main.rs", r#"fn main() { panic!("This should never be run."); }"#, ) .file("src/bin/other.rs", "fn main() {}") .build(); p.cargo("run --bin other").run(); } #[cargo_test] fn run_proper_binary_main_rs() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/lib.rs", "") .file("src/bin/main.rs", "fn main() {}") .build(); p.cargo("run --bin foo").run(); } #[cargo_test] fn run_proper_alias_binary_from_src() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" authors = [] version = "0.0.0" [[bin]] name = "foo" [[bin]] name = "bar" "#, ) .file("src/foo.rs", r#"fn main() { println!("foo"); }"#) .file("src/bar.rs", r#"fn main() { println!("bar"); }"#) .build(); p.cargo("build --workspace").run(); p.process(&p.bin("foo")).with_stdout("foo\n").run(); p.process(&p.bin("bar")).with_stdout("bar\n").run(); } #[cargo_test] fn run_proper_alias_binary_main_rs() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" authors = [] version = "0.0.0" [[bin]] name = "foo" [[bin]] name = "bar" "#, ) .file("src/main.rs", r#"fn main() { println!("main"); }"#) .build(); p.cargo("build --workspace").run(); p.process(&p.bin("foo")).with_stdout("main\n").run(); p.process(&p.bin("bar")).with_stdout("main\n").run(); } #[cargo_test] fn run_proper_binary_main_rs_as_foo() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file( "src/foo.rs", r#" fn main() { panic!("This should never be run."); }"#, ) .file("src/main.rs", "fn main() {}") .build(); p.cargo("run --bin foo").run(); } #[cargo_test] fn rustc_wrapper() { let p = project().file("src/lib.rs", "").build(); let wrapper = tools::echo_wrapper(); let running = format!( "[RUNNING] `{} rustc --crate-name foo [..]", wrapper.display() ); p.cargo("build -v") .env("RUSTC_WRAPPER", &wrapper) .with_stderr_contains(&running) .run(); p.build_dir().rm_rf(); p.cargo("build -v") .env("RUSTC_WORKSPACE_WRAPPER", &wrapper) .with_stderr_contains(&running) .run(); } #[cargo_test] fn rustc_wrapper_relative() { Package::new("bar", "1.0.0").publish(); let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] bar = "1.0" "#, ) .file("src/lib.rs", "") .build(); let wrapper = tools::echo_wrapper(); let exe_name = wrapper.file_name().unwrap().to_str().unwrap(); let relative_path = format!("./{}", exe_name); fs::hard_link(&wrapper, p.root().join(exe_name)).unwrap(); let running = format!("[RUNNING] `[ROOT]/foo/./{} rustc[..]", exe_name); p.cargo("build -v") .env("RUSTC_WRAPPER", &relative_path) .with_stderr_contains(&running) .run(); p.build_dir().rm_rf(); p.cargo("build -v") .env("RUSTC_WORKSPACE_WRAPPER", &relative_path) .with_stderr_contains(&running) .run(); p.build_dir().rm_rf(); p.change_file( ".cargo/config.toml", &format!( r#" build.rustc-wrapper = "./{}" "#, exe_name ), ); p.cargo("build -v").with_stderr_contains(&running).run(); } #[cargo_test] fn rustc_wrapper_from_path() { let p = project().file("src/lib.rs", "").build(); p.cargo("build -v") .env("RUSTC_WRAPPER", "wannabe_sccache") .with_status(101) .with_stderr_contains("[..]`wannabe_sccache rustc [..]") .run(); p.build_dir().rm_rf(); p.cargo("build -v") .env("RUSTC_WORKSPACE_WRAPPER", "wannabe_sccache") .with_status(101) .with_stderr_contains("[..]`wannabe_sccache rustc [..]") .run(); } #[cargo_test] fn cdylib_not_lifted() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" authors = [] version = "0.1.0" [lib] crate-type = ["cdylib"] "#, ) .file("src/lib.rs", "") .build(); p.cargo("build").run(); let files = if cfg!(windows) { if cfg!(target_env = "msvc") { vec!["foo.dll.lib", "foo.dll.exp", "foo.dll"] } else { vec!["libfoo.dll.a", "foo.dll"] } } else if cfg!(target_os = "macos") { vec!["libfoo.dylib"] } else { vec!["libfoo.so"] }; for file in files { println!("checking: {}", file); assert!(p.root().join("target/debug/deps").join(&file).is_file()); } } #[cargo_test] fn cdylib_final_outputs() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo-bar" authors = [] version = "0.1.0" [lib] crate-type = ["cdylib"] "#, ) .file("src/lib.rs", "") .build(); p.cargo("build").run(); let files = if cfg!(windows) { if cfg!(target_env = "msvc") { vec!["foo_bar.dll.lib", "foo_bar.dll"] } else { vec!["foo_bar.dll", "libfoo_bar.dll.a"] } } else if cfg!(target_os = "macos") { vec!["libfoo_bar.dylib"] } else { vec!["libfoo_bar.so"] }; for file in files { println!("checking: {}", file); assert!(p.root().join("target/debug").join(&file).is_file()); } } #[cargo_test] // NOTE: Windows MSVC and wasm32-unknown-emscripten do not use metadata. Skip them. // See <https://github.com/rust-lang/cargo/issues/9325#issuecomment-1030662699> #[cfg(not(all(target_os = "windows", target_env = "msvc")))] fn no_dep_info_collision_when_cdylib_and_bin_coexist() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "1.0.0" [lib] crate-type = ["cdylib"] "#, ) .file("src/main.rs", "fn main() {}") .file("src/lib.rs", "") .build(); p.cargo("build -v") .with_stderr_unordered( "\ [COMPILING] foo v1.0.0 ([CWD]) [RUNNING] `rustc [..] --crate-type bin [..] -C metadata=[..]` [RUNNING] `rustc [..] --crate-type cdylib [..] -C metadata=[..]` [FINISHED] [..] ", ) .run(); let deps_dir = p.target_debug_dir().join("deps"); assert!(deps_dir.join("foo.d").exists()); let dep_info_count = deps_dir .read_dir() .unwrap() .filter(|e| { let filename = e.as_ref().unwrap().file_name(); let filename = filename.to_str().unwrap(); filename.starts_with("foo") && filename.ends_with(".d") }) .count(); // cdylib -> foo.d // bin -> foo-<meta>.d assert_eq!(dep_info_count, 2); } #[cargo_test] fn deterministic_cfg_flags() { // This bug is non-deterministic. let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" authors = [] build = "build.rs" [features] default = ["f_a", "f_b", "f_c", "f_d"] f_a = [] f_b = [] f_c = [] f_d = [] "#, ) .file( "build.rs", r#" fn main() { println!("cargo:rustc-cfg=cfg_a"); println!("cargo:rustc-cfg=cfg_b"); println!("cargo:rustc-cfg=cfg_c"); println!("cargo:rustc-cfg=cfg_d"); println!("cargo:rustc-cfg=cfg_e"); } "#, ) .file("src/main.rs", "fn main() {}") .build(); p.cargo("build -v") .with_stderr( "\ [COMPILING] foo v0.1.0 [..] [RUNNING] [..] [RUNNING] [..] [RUNNING] `rustc --crate-name foo [..] \ --cfg[..]default[..]--cfg[..]f_a[..]--cfg[..]f_b[..]\ --cfg[..]f_c[..]--cfg[..]f_d[..] \ --cfg cfg_a --cfg cfg_b --cfg cfg_c --cfg cfg_d --cfg cfg_e` [FINISHED] dev [unoptimized + debuginfo] target(s) in [..]", ) .run(); } #[cargo_test] fn explicit_bins_without_paths() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" authors = [] [[bin]] name = "foo" [[bin]] name = "bar" "#, ) .file("src/lib.rs", "") .file("src/main.rs", "fn main() {}") .file("src/bin/bar.rs", "fn main() {}") .build(); p.cargo("build").run(); } #[cargo_test] fn no_bin_in_src_with_lib() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/lib.rs", "") .file("src/foo.rs", "fn main() {}") .build(); p.cargo("build") .with_status(101) .with_stderr_contains( "\ [ERROR] failed to parse manifest at `[..]` Caused by: can't find `foo` bin at `src/bin/foo.rs` or `src/bin/foo/main.rs`. [..]", ) .run(); } #[cargo_test] fn inferred_bins() { let p = project() .file("src/main.rs", "fn main() {}") .file("src/bin/bar.rs", "fn main() {}") .file("src/bin/baz/main.rs", "fn main() {}") .build(); p.cargo("build").run(); assert!(p.bin("foo").is_file()); assert!(p.bin("bar").is_file()); assert!(p.bin("baz").is_file()); } #[cargo_test] fn inferred_bins_duplicate_name() { // this should fail, because we have two binaries with the same name let p = project() .file("src/main.rs", "fn main() {}") .file("src/bin/bar.rs", "fn main() {}") .file("src/bin/bar/main.rs", "fn main() {}") .build(); p.cargo("build").with_status(101).with_stderr_contains( "[..]found duplicate binary name bar, but all binary targets must have a unique name[..]", ) .run(); } #[cargo_test] fn inferred_bin_path() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" authors = [] [[bin]] name = "bar" # Note, no `path` key! "#, ) .file("src/bin/bar/main.rs", "fn main() {}") .build(); p.cargo("build").run(); assert!(p.bin("bar").is_file()); } #[cargo_test] fn inferred_examples() { let p = project() .file("src/lib.rs", "fn main() {}") .file("examples/bar.rs", "fn main() {}") .file("examples/baz/main.rs", "fn main() {}") .build(); p.cargo("build --examples").run(); assert!(p.bin("examples/bar").is_file()); assert!(p.bin("examples/baz").is_file()); } #[cargo_test] fn inferred_tests() { let p = project() .file("src/lib.rs", "fn main() {}") .file("tests/bar.rs", "fn main() {}") .file("tests/baz/main.rs", "fn main() {}") .build(); p.cargo("test --test=bar --test=baz").run(); } #[cargo_test] fn inferred_benchmarks() { let p = project() .file("src/lib.rs", "fn main() {}") .file("benches/bar.rs", "fn main() {}") .file("benches/baz/main.rs", "fn main() {}") .build(); p.cargo("bench --bench=bar --bench=baz").run(); } #[cargo_test] fn target_edition() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" [lib] edition = "2018" "#, ) .file("src/lib.rs", "") .build(); p.cargo("build -v") .with_stderr_contains( "\ [COMPILING] foo v0.0.1 ([..]) [RUNNING] `rustc [..]--edition=2018 [..] ", ) .run(); } #[cargo_test] fn target_edition_override() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = [] edition = "2018" [lib] edition = "2015" "#, ) .file( "src/lib.rs", " pub fn async() {} pub fn try() {} pub fn await() {} ", ) .build(); p.cargo("build -v").run(); } #[cargo_test] fn same_metadata_different_directory() { // A top-level crate built in two different workspaces should have the // same metadata hash. let p = project() .at("foo1") .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .build(); let output = t!(String::from_utf8( t!(p.cargo("build -v").exec_with_output()).stderr, )); let metadata = output .split_whitespace() .find(|arg| arg.starts_with("metadata=")) .unwrap(); let p = project() .at("foo2") .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .build(); p.cargo("build -v") .with_stderr_contains(format!("[..]{}[..]", metadata)) .run(); } #[cargo_test] fn building_a_dependent_crate_witout_bin_should_fail() { Package::new("testless", "0.1.0") .file( "Cargo.toml", r#" [project] name = "testless" version = "0.1.0" [[bin]] name = "a_bin" "#, ) .file("src/lib.rs", "") .publish(); let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" [dependencies] testless = "0.1.0" "#, ) .file("src/lib.rs", "") .build(); p.cargo("build") .with_status(101) .with_stderr_contains( "[..]can't find `a_bin` bin at `src/bin/a_bin.rs` or `src/bin/a_bin/main.rs`[..]", ) .run(); } #[cargo_test] #[cfg(any(target_os = "macos", target_os = "ios"))] fn uplift_dsym_of_bin_on_mac() { let p = project() .file("src/main.rs", "fn main() { panic!(); }") .file("src/bin/b.rs", "fn main() { panic!(); }") .file("examples/c.rs", "fn main() { panic!(); }") .file("tests/d.rs", "fn main() { panic!(); }") .build(); p.cargo("build --bins --examples --tests") .enable_mac_dsym() .run(); assert!(p.target_debug_dir().join("foo.dSYM").is_dir()); assert!(p.target_debug_dir().join("b.dSYM").is_dir()); assert!(p.target_debug_dir().join("b.dSYM").is_symlink()); assert!(p.target_debug_dir().join("examples/c.dSYM").is_dir()); assert!(!p.target_debug_dir().join("c.dSYM").exists()); assert!(!p.target_debug_dir().join("d.dSYM").exists()); } #[cargo_test] #[cfg(any(target_os = "macos", target_os = "ios"))] fn uplift_dsym_of_bin_on_mac_when_broken_link_exists() { let p = project() .file("src/main.rs", "fn main() { panic!(); }") .build(); let dsym = p.target_debug_dir().join("foo.dSYM"); p.cargo("build").enable_mac_dsym().run(); assert!(dsym.is_dir()); // Simulate the situation where the underlying dSYM bundle goes missing // but the uplifted symlink to it remains. This would previously cause // builds to permanently fail until the bad symlink was manually removed. dsym.rm_rf(); p.symlink( p.target_debug_dir() .join("deps") .join("foo-baaaaaadbaaaaaad.dSYM"), &dsym, ); assert!(dsym.is_symlink()); assert!(!dsym.exists()); p.cargo("build").enable_mac_dsym().run(); assert!(dsym.is_dir()); } #[cargo_test] #[cfg(all(target_os = "windows", target_env = "msvc"))] fn uplift_pdb_of_bin_on_windows() { let p = project() .file("src/main.rs", "fn main() { panic!(); }") .file("src/bin/b.rs", "fn main() { panic!(); }") .file("src/bin/foo-bar.rs", "fn main() { panic!(); }") .file("examples/c.rs", "fn main() { panic!(); }") .file("tests/d.rs", "fn main() { panic!(); }") .build(); p.cargo("build --bins --examples --tests").run(); assert!(p.target_debug_dir().join("foo.pdb").is_file()); assert!(p.target_debug_dir().join("b.pdb").is_file()); assert!(p.target_debug_dir().join("examples/c.pdb").exists()); assert!(p.target_debug_dir().join("foo-bar.exe").is_file()); assert!(p.target_debug_dir().join("foo_bar.pdb").is_file()); assert!(!p.target_debug_dir().join("c.pdb").exists()); assert!(!p.target_debug_dir().join("d.pdb").exists()); } // Ensure that `cargo build` chooses the correct profile for building // targets based on filters (assuming `--profile` is not specified). #[cargo_test] fn build_filter_infer_profile() { let p = project() .file("src/lib.rs", "") .file("src/main.rs", "fn main() {}") .file("tests/t1.rs", "") .file("benches/b1.rs", "") .file("examples/ex1.rs", "fn main() {}") .build(); p.cargo("build -v") .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ --emit=[..]link[..]", ) .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/main.rs [..]--crate-type bin \ --emit=[..]link[..]", ) .run(); p.root().join("target").rm_rf(); p.cargo("build -v --test=t1") .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ --emit=[..]link[..]-C debuginfo=2 [..]", ) .with_stderr_contains( "[RUNNING] `rustc --crate-name t1 tests/t1.rs [..]--emit=[..]link[..]\ -C debuginfo=2 [..]", ) .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/main.rs [..]--crate-type bin \ --emit=[..]link[..]-C debuginfo=2 [..]", ) .run(); p.root().join("target").rm_rf(); // Bench uses test profile without `--release`. p.cargo("build -v --bench=b1") .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ --emit=[..]link[..]-C debuginfo=2 [..]", ) .with_stderr_contains( "[RUNNING] `rustc --crate-name b1 benches/b1.rs [..]--emit=[..]link[..]\ -C debuginfo=2 [..]", ) .with_stderr_does_not_contain("opt-level") .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/main.rs [..]--crate-type bin \ --emit=[..]link[..]-C debuginfo=2 [..]", ) .run(); } #[cargo_test] fn targets_selected_default() { let p = project().file("src/main.rs", "fn main() {}").build(); p.cargo("build -v") // Binaries. .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/main.rs [..]--crate-type bin \ --emit=[..]link[..]", ) // Benchmarks. .with_stderr_does_not_contain( "[RUNNING] `rustc --crate-name foo src/main.rs [..]--emit=[..]link \ -C opt-level=3 --test [..]", ) // Unit tests. .with_stderr_does_not_contain( "[RUNNING] `rustc --crate-name foo src/main.rs [..]--emit=[..]link[..]\ -C debuginfo=2 --test [..]", ) .run(); } #[cargo_test] fn targets_selected_all() { let p = project().file("src/main.rs", "fn main() {}").build(); p.cargo("build -v --all-targets") // Binaries. .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/main.rs [..]--crate-type bin \ --emit=[..]link[..]", ) // Unit tests. .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/main.rs [..]--emit=[..]link[..]\ -C debuginfo=2 --test [..]", ) .run(); } #[cargo_test] fn all_targets_no_lib() { let p = project().file("src/main.rs", "fn main() {}").build(); p.cargo("build -v --all-targets") // Binaries. .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/main.rs [..]--crate-type bin \ --emit=[..]link[..]", ) // Unit tests. .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/main.rs [..]--emit=[..]link[..]\ -C debuginfo=2 --test [..]", ) .run(); } #[cargo_test] fn no_linkable_target() { // Issue 3169: this is currently not an error as per discussion in PR #4797. let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" authors = [] [dependencies] the_lib = { path = "the_lib" } "#, ) .file("src/main.rs", "fn main() {}") .file( "the_lib/Cargo.toml", r#" [package] name = "the_lib" version = "0.1.0" [lib] name = "the_lib" crate-type = ["staticlib"] "#, ) .file("the_lib/src/lib.rs", "pub fn foo() {}") .build(); p.cargo("build") .with_stderr_contains( "[WARNING] The package `the_lib` provides no linkable [..] \ while compiling `foo`. [..] in `the_lib`'s Cargo.toml. [..]", ) .run(); } #[cargo_test] fn avoid_dev_deps() { Package::new("foo", "1.0.0").publish(); let p = project() .file( "Cargo.toml", r#" [package] name = "bar" version = "0.1.0" authors = [] [dev-dependencies] baz = "1.0.0" "#, ) .file("src/main.rs", "fn main() {}") .build(); p.cargo("build") .with_status(101) .with_stderr( "\ [UPDATING] [..] [ERROR] no matching package named `baz` found location searched: registry `crates-io` required by package `bar v0.1.0 ([..]/foo)` ", ) .run(); p.cargo("build -Zavoid-dev-deps") .masquerade_as_nightly_cargo() .run(); } #[cargo_test] fn default_cargo_config_jobs() { let p = project() .file("src/lib.rs", "") .file( ".cargo/config", r#" [build] jobs = 1 "#, ) .build(); p.cargo("build -v").run(); } #[cargo_test] fn good_cargo_config_jobs() { let p = project() .file("src/lib.rs", "") .file( ".cargo/config", r#" [build] jobs = 4 "#, ) .build(); p.cargo("build -v").run(); } #[cargo_test] fn invalid_cargo_config_jobs() { let p = project() .file("src/lib.rs", "") .file( ".cargo/config", r#" [build] jobs = 0 "#, ) .build(); p.cargo("build -v") .with_status(101) .with_stderr_contains("error: jobs may not be 0") .run(); } #[cargo_test] fn invalid_jobs() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .build(); p.cargo("build --jobs -1") .with_status(1) .with_stderr_contains( "error: Found argument '-1' which wasn't expected, or isn't valid in this context", ) .run(); p.cargo("build --jobs over9000") .with_status(1) .with_stderr("error: Invalid value: could not parse `over9000` as a number") .run(); } #[cargo_test] fn target_filters_workspace() { let ws = project() .at("ws") .file( "Cargo.toml", r#" [workspace] members = ["a", "b"] "#, ) .file("a/Cargo.toml", &basic_lib_manifest("a")) .file("a/src/lib.rs", "") .file("a/examples/ex1.rs", "fn main() {}") .file("b/Cargo.toml", &basic_bin_manifest("b")) .file("b/src/lib.rs", "") .file("b/src/main.rs", "fn main() {}") .build(); ws.cargo("build -v --example ex") .with_status(101) .with_stderr( "\ [ERROR] no example target named `ex` <tab>Did you mean `ex1`?", ) .run(); ws.cargo("build -v --example 'ex??'") .with_status(101) .with_stderr( "\ [ERROR] no example target matches pattern `ex??` <tab>Did you mean `ex1`?", ) .run(); ws.cargo("build -v --lib") .with_stderr_contains("[RUNNING] `rustc [..]a/src/lib.rs[..]") .with_stderr_contains("[RUNNING] `rustc [..]b/src/lib.rs[..]") .run(); ws.cargo("build -v --example ex1") .with_stderr_contains("[RUNNING] `rustc [..]a/examples/ex1.rs[..]") .run(); } #[cargo_test] fn target_filters_workspace_not_found() { let ws = project() .at("ws") .file( "Cargo.toml", r#" [workspace] members = ["a", "b"] "#, ) .file("a/Cargo.toml", &basic_bin_manifest("a")) .file("a/src/main.rs", "fn main() {}") .file("b/Cargo.toml", &basic_bin_manifest("b")) .file("b/src/main.rs", "fn main() {}") .build(); ws.cargo("build -v --lib") .with_status(101) .with_stderr("[ERROR] no library targets found in packages: a, b") .run(); } #[cfg(unix)] #[cargo_test] fn signal_display() { // Cause the compiler to crash with a signal. let foo = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] pm = { path = "pm" } "#, ) .file( "src/lib.rs", r#" #[macro_use] extern crate pm; #[derive(Foo)] pub struct S; "#, ) .file( "pm/Cargo.toml", r#" [package] name = "pm" version = "0.1.0" [lib] proc-macro = true "#, ) .file( "pm/src/lib.rs", r#" extern crate proc_macro; use proc_macro::TokenStream; #[proc_macro_derive(Foo)] pub fn derive(_input: TokenStream) -> TokenStream { std::process::abort() } "#, ) .build(); foo.cargo("build") .with_stderr( "\ [COMPILING] pm [..] [COMPILING] foo [..] [ERROR] could not compile `foo` Caused by: process didn't exit successfully: `rustc [..]` (signal: 6, SIGABRT: process abort signal) ", ) .with_status(101) .run(); } #[cargo_test] fn tricky_pipelining() { let foo = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] bar = { path = "bar" } "#, ) .file("src/lib.rs", "extern crate bar;") .file("bar/Cargo.toml", &basic_lib_manifest("bar")) .file("bar/src/lib.rs", "") .build(); foo.cargo("build -p bar").run(); foo.cargo("build -p foo").run(); } #[cargo_test] fn pipelining_works() { let foo = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] bar = { path = "bar" } "#, ) .file("src/lib.rs", "extern crate bar;") .file("bar/Cargo.toml", &basic_lib_manifest("bar")) .file("bar/src/lib.rs", "") .build(); foo.cargo("build") .with_stdout("") .with_stderr( "\ [COMPILING] [..] [COMPILING] [..] [FINISHED] [..] ", ) .run(); } #[cargo_test] fn pipelining_big_graph() { // Create a crate graph of the form {a,b}{0..29}, where {a,b}(n) depend on {a,b}(n+1) // Then have `foo`, a binary crate, depend on the whole thing. let mut project = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] a1 = { path = "a1" } b1 = { path = "b1" } "#, ) .file("src/main.rs", "fn main(){}"); for n in 0..30 { for x in &["a", "b"] { project = project .file( &format!("{x}{n}/Cargo.toml", x = x, n = n), &format!( r#" [package] name = "{x}{n}" version = "0.1.0" [dependencies] a{np1} = {{ path = "../a{np1}" }} b{np1} = {{ path = "../b{np1}" }} "#, x = x, n = n, np1 = n + 1 ), ) .file(&format!("{x}{n}/src/lib.rs", x = x, n = n), ""); } } let foo = project .file("a30/Cargo.toml", &basic_lib_manifest("a30")) .file( "a30/src/lib.rs", r#"compile_error!("don't actually build me");"#, ) .file("b30/Cargo.toml", &basic_lib_manifest("b30")) .file("b30/src/lib.rs", "") .build(); foo.cargo("build -p foo") .with_status(101) .with_stderr_contains("[ERROR] could not compile `a30`[..]") .run(); } #[cargo_test] fn forward_rustc_output() { let foo = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" edition = '2018' [dependencies] bar = { path = "bar" } "#, ) .file("src/lib.rs", "bar::foo!();") .file( "bar/Cargo.toml", r#" [package] name = "bar" version = "0.1.0" [lib] proc-macro = true "#, ) .file( "bar/src/lib.rs", r#" extern crate proc_macro; use proc_macro::*; #[proc_macro] pub fn foo(input: TokenStream) -> TokenStream { println!("a"); println!("b"); println!("{{}}"); eprintln!("c"); eprintln!("d"); eprintln!("{{a"); // "malformed json" input } "#, ) .build(); foo.cargo("build") .with_stdout("a\nb\n{}") .with_stderr( "\ [COMPILING] [..] [COMPILING] [..] c d {a [FINISHED] [..] ", ) .run(); } #[cargo_test] fn build_lib_only() { let p = project() .file("src/main.rs", "fn main() {}") .file("src/lib.rs", r#" "#) .build(); p.cargo("build --lib -v") .with_stderr( "\ [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ --emit=[..]link[..]-C debuginfo=2 \ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/debug/deps` [FINISHED] dev [unoptimized + debuginfo] target(s) in [..]", ) .run(); } #[cargo_test] fn build_with_no_lib() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/main.rs", "fn main() {}") .build(); p.cargo("build --lib") .with_status(101) .with_stderr("[ERROR] no library targets found in package `foo`") .run(); } #[cargo_test] fn build_with_relative_cargo_home_path() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.0.1" authors = ["[email protected]"] [dependencies] "test-dependency" = { path = "src/test_dependency" } "#, ) .file("src/main.rs", "fn main() {}") .file("src/test_dependency/src/lib.rs", r#" "#) .file( "src/test_dependency/Cargo.toml", &basic_manifest("test-dependency", "0.0.1"), ) .build(); p.cargo("build").env("CARGO_HOME", "./cargo_home/").run(); } #[cargo_test] fn user_specific_cfgs_are_filtered_out() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/main.rs", r#"fn main() {}"#) .file( "build.rs", r#" fn main() { assert!(std::env::var_os("CARGO_CFG_PROC_MACRO").is_none()); assert!(std::env::var_os("CARGO_CFG_DEBUG_ASSERTIONS").is_none()); } "#, ) .build(); p.cargo("rustc -- --cfg debug_assertions --cfg proc_macro") .run(); p.process(&p.bin("foo")).run(); } #[cargo_test] fn close_output() { // What happens when stdout or stderr is closed during a build. // Server to know when rustc has spawned. let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); let addr = listener.local_addr().unwrap(); let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" edition = "2018" [lib] proc-macro = true [[bin]] name = "foobar" "#, ) .file( "src/lib.rs", &r#" use proc_macro::TokenStream; use std::io::Read; #[proc_macro] pub fn repro(_input: TokenStream) -> TokenStream { println!("hello stdout!"); eprintln!("hello stderr!"); // Tell the test we have started. let mut socket = std::net::TcpStream::connect("__ADDR__").unwrap(); // Wait for the test to tell us to start printing. let mut buf = [0]; drop(socket.read_exact(&mut buf)); let use_stderr = std::env::var("__CARGO_REPRO_STDERR").is_ok(); // Emit at least 1MB of data. // Linux pipes can buffer up to 64KB. // This test seems to be sensitive to having other threads // calling fork. My hypothesis is that the stdout/stderr // file descriptors are duplicated into the child process, // and during the short window between fork and exec, the // file descriptor is kept alive long enough for the // build to finish. It's a half-baked theory, but this // seems to prevent the spurious errors in CI. // An alternative solution is to run this test in // a single-threaded environment. for i in 0..100000 { if use_stderr { eprintln!("0123456789{}", i); } else { println!("0123456789{}", i); } } TokenStream::new() } "# .replace("__ADDR__", &addr.to_string()), ) .file( "src/bin/foobar.rs", r#" foo::repro!(); fn main() {} "#, ) .build(); // The `stderr` flag here indicates if this should forcefully close stderr or stdout. let spawn = |stderr: bool| { let mut cmd = p.cargo("build").build_command(); cmd.stdout(Stdio::piped()).stderr(Stdio::piped()); if stderr { cmd.env("__CARGO_REPRO_STDERR", "1"); } let mut child = cmd.spawn().unwrap(); // Wait for proc macro to start. let pm_conn = listener.accept().unwrap().0; // Close stderr or stdout. if stderr { drop(child.stderr.take()); } else { drop(child.stdout.take()); } // Tell the proc-macro to continue; drop(pm_conn); // Read the output from the other channel. let out: &mut dyn Read = if stderr { child.stdout.as_mut().unwrap() } else { child.stderr.as_mut().unwrap() }; let mut result = String::new(); out.read_to_string(&mut result).unwrap(); let status = child.wait().unwrap(); assert!(!status.success()); result }; let stderr = spawn(false); compare::match_unordered( "\ [COMPILING] foo [..] hello stderr! [ERROR] [..] [WARNING] build failed, waiting for other jobs to finish... ", &stderr, None, ) .unwrap(); // Try again with stderr. p.build_dir().rm_rf(); let stdout = spawn(true); assert_eq!(stdout, "hello stdout!\n"); } #[cargo_test] fn close_output_during_drain() { // Test to close the output during the build phase (drain_the_queue). // There was a bug where it would hang. // Server to know when rustc has spawned. let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); let addr = listener.local_addr().unwrap(); // Create a wrapper so the test can know when compiling has started. let rustc_wrapper = { let p = project() .at("compiler") .file("Cargo.toml", &basic_manifest("compiler", "1.0.0")) .file( "src/main.rs", &r#" use std::process::Command; use std::env; use std::io::Read; fn main() { // Only wait on the first dependency. if matches!(env::var("CARGO_PKG_NAME").as_deref(), Ok("dep")) { let mut socket = std::net::TcpStream::connect("__ADDR__").unwrap(); // Wait for the test to tell us to start printing. let mut buf = [0]; drop(socket.read_exact(&mut buf)); } let mut cmd = Command::new("rustc"); for arg in env::args_os().skip(1) { cmd.arg(arg); } std::process::exit(cmd.status().unwrap().code().unwrap()); } "# .replace("__ADDR__", &addr.to_string()), ) .build(); p.cargo("build").run(); p.bin("compiler") }; Package::new("dep", "1.0.0").publish(); let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] dep = "1.0" "#, ) .file("src/lib.rs", "") .build(); // Spawn cargo, wait for the first rustc to start, and then close stderr. let mut cmd = process(&cargo_exe()) .arg("check") .cwd(p.root()) .env("RUSTC", rustc_wrapper) .build_command(); cmd.stdout(Stdio::piped()).stderr(Stdio::piped()); let mut child = cmd.spawn().expect("cargo should spawn"); // Wait for the rustc wrapper to start. let rustc_conn = listener.accept().unwrap().0; // Close stderr to force an error. drop(child.stderr.take()); // Tell the wrapper to continue. drop(rustc_conn); match child.wait() { Ok(status) => assert!(!status.success()), Err(e) => panic!("child wait failed: {}", e), } } use cargo_test_support::registry::Dependency; #[cargo_test] fn reduced_reproduction_8249() { // https://github.com/rust-lang/cargo/issues/8249 Package::new("a-src", "0.1.0").links("a").publish(); Package::new("a-src", "0.2.0").links("a").publish(); Package::new("b", "0.1.0") .add_dep(Dependency::new("a-src", "0.1").optional(true)) .publish(); Package::new("b", "0.2.0") .add_dep(Dependency::new("a-src", "0.2").optional(true)) .publish(); Package::new("c", "1.0.0") .add_dep(&Dependency::new("b", "0.1.0")) .publish(); let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] b = { version = "*", features = ["a-src"] } a-src = "*" "#, ) .file("src/lib.rs", "") .build(); p.cargo("generate-lockfile").run(); cargo_util::paths::append(&p.root().join("Cargo.toml"), b"c = \"*\"").unwrap(); p.cargo("check").run(); p.cargo("check").run(); } #[cargo_test] fn target_directory_backup_exclusion() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .build(); // Newly created target/ should have CACHEDIR.TAG inside... p.cargo("build").run(); let cachedir_tag = p.build_dir().join("CACHEDIR.TAG"); assert!(cachedir_tag.is_file()); assert!(fs::read_to_string(&cachedir_tag) .unwrap() .starts_with("Signature: 8a477f597d28d172789f06886806bc55")); // ...but if target/ already exists CACHEDIR.TAG should not be created in it. fs::remove_file(&cachedir_tag).unwrap(); p.cargo("build").run(); assert!(!&cachedir_tag.is_file()); } #[cargo_test] fn simple_terminal_width() { if !is_nightly() { // --terminal-width is unstable return; } let p = project() .file( "src/lib.rs", r#" fn main() { let _: () = 42; } "#, ) .build(); p.cargo("build -Zterminal-width=20") .masquerade_as_nightly_cargo() .with_status(101) .with_stderr_contains("3 | ..._: () = 42;") .run(); } #[cargo_test] fn build_script_o0_default() { let p = project() .file("src/lib.rs", "") .file("build.rs", "fn main() {}") .build(); p.cargo("build -v --release") .with_stderr_does_not_contain("[..]build_script_build[..]opt-level[..]") .run(); } #[cargo_test] fn build_script_o0_default_even_with_release() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [profile.release] opt-level = 1 "#, ) .file("src/lib.rs", "") .file("build.rs", "fn main() {}") .build(); p.cargo("build -v --release") .with_stderr_does_not_contain("[..]build_script_build[..]opt-level[..]") .run(); } #[cargo_test] fn primary_package_env_var() { // Test that CARGO_PRIMARY_PACKAGE is enabled only for "foo" and not for any dependency. let is_primary_package = r#" pub fn is_primary_package() -> bool {{ option_env!("CARGO_PRIMARY_PACKAGE").is_some() }} "#; Package::new("qux", "0.1.0") .file("src/lib.rs", is_primary_package) .publish(); let baz = git::new("baz", |project| { project .file("Cargo.toml", &basic_manifest("baz", "0.1.0")) .file("src/lib.rs", is_primary_package) }); let foo = project() .file( "Cargo.toml", &format!( r#" [package] name = "foo" version = "0.1.0" [dependencies] bar = {{ path = "bar" }} baz = {{ git = '{}' }} qux = "0.1" "#, baz.url() ), ) .file( "src/lib.rs", &format!( r#" extern crate bar; extern crate baz; extern crate qux; {} #[test] fn verify_primary_package() {{ assert!(!bar::is_primary_package()); assert!(!baz::is_primary_package()); assert!(!qux::is_primary_package()); assert!(is_primary_package()); }} "#, is_primary_package ), ) .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", is_primary_package) .build(); foo.cargo("test").run(); } #[cfg_attr(windows, ignore)] // weird normalization issue with windows and cargo-test-support #[cargo_test] fn check_cfg_features() { if !is_nightly() { // --check-cfg is a nightly only rustc command line return; } let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" [features] f_a = [] f_b = [] "#, ) .file("src/main.rs", "fn main() {}") .build(); p.cargo("build -v -Z check-cfg-features") .masquerade_as_nightly_cargo() .with_stderr( "\ [COMPILING] foo v0.1.0 [..] [RUNNING] `rustc [..] --check-cfg 'values(feature, \"f_a\", \"f_b\")' [..] [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cfg_attr(windows, ignore)] // weird normalization issue with windows and cargo-test-support #[cargo_test] fn check_cfg_features_with_deps() { if !is_nightly() { // --check-cfg is a nightly only rustc command line return; } let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" [dependencies] bar = { path = "bar/" } [features] f_a = [] f_b = [] "#, ) .file("src/main.rs", "fn main() {}") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "#[allow(dead_code)] fn bar() {}") .build(); p.cargo("build -v -Z check-cfg-features") .masquerade_as_nightly_cargo() .with_stderr( "\ [COMPILING] bar v0.1.0 [..] [RUNNING] `rustc [..] --check-cfg 'values(feature)' [..] [COMPILING] foo v0.1.0 [..] [RUNNING] `rustc --crate-name foo [..] --check-cfg 'values(feature, \"f_a\", \"f_b\")' [..] [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cfg_attr(windows, ignore)] // weird normalization issue with windows and cargo-test-support #[cargo_test] fn check_cfg_features_with_opt_deps() { if !is_nightly() { // --check-cfg is a nightly only rustc command line return; } let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" [dependencies] bar = { path = "bar/", optional = true } [features] default = ["bar"] f_a = [] f_b = [] "#, ) .file("src/main.rs", "fn main() {}") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "#[allow(dead_code)] fn bar() {}") .build(); p.cargo("build -v -Z check-cfg-features") .masquerade_as_nightly_cargo() .with_stderr( "\ [COMPILING] bar v0.1.0 [..] [RUNNING] `rustc [..] --check-cfg 'values(feature)' [..] [COMPILING] foo v0.1.0 [..] [RUNNING] `rustc --crate-name foo [..] --check-cfg 'values(feature, \"bar\", \"default\", \"f_a\", \"f_b\")' [..] [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cfg_attr(windows, ignore)] // weird normalization issue with windows and cargo-test-support #[cargo_test] fn check_cfg_features_with_namespaced_features() { if !is_nightly() { // --check-cfg is a nightly only rustc command line return; } let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" [dependencies] bar = { path = "bar/", optional = true } [features] f_a = ["dep:bar"] f_b = [] "#, ) .file("src/main.rs", "fn main() {}") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "#[allow(dead_code)] fn bar() {}") .build(); p.cargo("build -v -Z check-cfg-features") .masquerade_as_nightly_cargo() .with_stderr( "\ [COMPILING] foo v0.1.0 [..] [RUNNING] `rustc --crate-name foo [..] --check-cfg 'values(feature, \"f_a\", \"f_b\")' [..] [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); }
it_examples() {
host_style_bucket_test.go
package s3_test import ( "net/url" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/awstesting/unit" "github.com/aws/aws-sdk-go/service/s3" ) type s3BucketTest struct { bucket string url string errCode string } var ( sslTests = []s3BucketTest{ {"abc", "https://abc.s3.mock-region.amazonaws.com/", ""}, {"a$b$c", "https://s3.mock-region.amazonaws.com/a%24b%24c", ""}, {"a.b.c", "https://s3.mock-region.amazonaws.com/a.b.c", ""}, {"a..bc", "https://s3.mock-region.amazonaws.com/a..bc", ""}, } nosslTests = []s3BucketTest{ {"a.b.c", "http://a.b.c.s3.mock-region.amazonaws.com/", ""}, {"a..bc", "http://s3.mock-region.amazonaws.com/a..bc", ""}, } forcepathTests = []s3BucketTest{ {"abc", "https://s3.mock-region.amazonaws.com/abc", ""}, {"a$b$c", "https://s3.mock-region.amazonaws.com/a%24b%24c", ""}, {"a.b.c", "https://s3.mock-region.amazonaws.com/a.b.c", ""}, {"a..bc", "https://s3.mock-region.amazonaws.com/a..bc", ""}, } accelerateTests = []s3BucketTest{ {"abc", "https://abc.s3-accelerate.amazonaws.com/", ""}, {"a.b.c", "https://s3.mock-region.amazonaws.com/%7BBucket%7D", "InvalidParameterException"}, {"a$b$c", "https://s3.mock-region.amazonaws.com/%7BBucket%7D", "InvalidParameterException"}, } accelerateNoSSLTests = []s3BucketTest{ {"abc", "http://abc.s3-accelerate.amazonaws.com/", ""}, {"a.b.c", "http://a.b.c.s3-accelerate.amazonaws.com/", ""}, {"a$b$c", "http://s3.mock-region.amazonaws.com/%7BBucket%7D", "InvalidParameterException"}, } accelerateDualstack = []s3BucketTest{ {"abc", "https://abc.s3-accelerate.dualstack.amazonaws.com/", ""}, {"a.b.c", "https://s3.dualstack.mock-region.amazonaws.com/%7BBucket%7D", "InvalidParameterException"}, {"a$b$c", "https://s3.dualstack.mock-region.amazonaws.com/%7BBucket%7D", "InvalidParameterException"}, } ) func runTests(t *testing.T, svc *s3.S3, tests []s3BucketTest) { for i, test := range tests { req, _ := svc.ListObjectsRequest(&s3.ListObjectsInput{Bucket: &test.bucket}) req.Build() assert.Equal(t, test.url, req.HTTPRequest.URL.String(), "test case %d", i) if test.errCode != "" { require.Error(t, req.Error, "test case %d", i) assert.Contains(t, req.Error.(awserr.Error).Code(), test.errCode, "test case %d", i) } } } func TestAccelerateBucketBuild(t *testing.T) { s := s3.New(unit.Session, &aws.Config{S3UseAccelerate: aws.Bool(true)}) runTests(t, s, accelerateTests) } func TestAccelerateNoSSLBucketBuild(t *testing.T) { s := s3.New(unit.Session, &aws.Config{S3UseAccelerate: aws.Bool(true), DisableSSL: aws.Bool(true)}) runTests(t, s, accelerateNoSSLTests) } func TestAccelerateDualstackBucketBuild(t *testing.T) { s := s3.New(unit.Session, &aws.Config{ S3UseAccelerate: aws.Bool(true), UseDualStack: aws.Bool(true), }) runTests(t, s, accelerateDualstack) } func TestHostStyleBucketBuild(t *testing.T) { s := s3.New(unit.Session) runTests(t, s, sslTests) } func TestHostStyleBucketBuildNoSSL(t *testing.T) { s := s3.New(unit.Session, &aws.Config{DisableSSL: aws.Bool(true)}) runTests(t, s, nosslTests)
s := s3.New(unit.Session, &aws.Config{S3ForcePathStyle: aws.Bool(true)}) runTests(t, s, forcepathTests) } func TestHostStyleBucketGetBucketLocation(t *testing.T) { s := s3.New(unit.Session) req, _ := s.GetBucketLocationRequest(&s3.GetBucketLocationInput{ Bucket: aws.String("bucket"), }) req.Build() require.NoError(t, req.Error) u, _ := url.Parse(req.HTTPRequest.URL.String()) assert.NotContains(t, u.Host, "bucket") assert.Contains(t, u.Path, "bucket") }
} func TestPathStyleBucketBuild(t *testing.T) {
world.js
'use strict'; var request = require('supertest'); var app = require('../../app.js'); function
() { this.request = function () { return request(app); } } module.exports = function() { this.World = World; };
World
CreateManyMovieResolver.ts
import * as TypeGraphQL from "type-graphql"; import { CreateManyMovieArgs } from "./args/CreateManyMovieArgs"; import { Movie } from "../../../models/Movie"; import { AffectedRowsOutput } from "../../outputs/AffectedRowsOutput"; @TypeGraphQL.Resolver(_of => Movie) export class
{ @TypeGraphQL.Mutation(_returns => AffectedRowsOutput, { nullable: false }) async createManyMovie(@TypeGraphQL.Ctx() ctx: any, @TypeGraphQL.Args() args: CreateManyMovieArgs): Promise<AffectedRowsOutput> { return ctx.prisma.movie.createMany(args); } }
CreateManyMovieResolver
utils_test.go
package checkpoints
import ( "fmt" "strings" "testing" ) func TestFindCheckpointDefinition_Markdown(t *testing.T) { result := []string{} for _, def := range AllCheckpoints { row := "## " + def.Name + "\n* 前缀:`${" + def.Prefix + "}`\n* 描述:" + def.Description if def.HasParams { row += "\n* 是否有子参数:YES" paramOptions := def.Instance.ParamOptions() if paramOptions != nil && len(paramOptions.Options) > 0 { row += "\n* 可选子参数" for _, option := range paramOptions.Options { row += "\n * `" + option.Name + "`:值为 `" + option.Value + "`" } } } else { row += "\n* 是否有子参数:NO" } row += "\n" result = append(result, row) } fmt.Print(strings.Join(result, "\n") + "\n") }
test_form_mutation.py
import unittest from unittest import TestCase import mechanize def first_form(text, base_uri="http://example.com/"): return mechanize.ParseString(text, base_uri)[0] class MutationTests(TestCase):
if __name__ == "__main__": unittest.main()
def test_add_textfield(self): form = first_form('<input type="text" name="foo" value="bar" />') more = first_form('<input type="text" name="spam" value="eggs" />') combined = form.controls + more.controls for control in more.controls: control.add_to_form(form) self.assertEquals(form.controls, combined)
ServiceProduct.js
import { request } from '../utils' import config from "../../config" export async function asyncGetAllProduct(params) { return request(config.index.allProduct, { method: 'get', data:{} }) } export async function asyncGetProductVersion(params) { return request(config.index.versionList, { method: 'get', data:params }) } export async function
(params) { return request(config.index.allVersionImage, { method: 'get', data:params }) }
asyncGetAllImages
main.rs
mod api; mod asset; mod handlers; mod iex; mod portfolio; mod processor; mod slack; mod user; use warp::Filter; #[tokio::main] async fn
() { pretty_env_logger::init(); let api = api::compose_api().with(warp::log("exchange")); warp::serve(api).run(([127, 0, 0, 1], 8000)).await; }
main
easystepper.go
// Simple driver to rotate a 4-wire stepper motor package easystepper // import "tinygo.org/x/drivers/easystepper" import ( "machine" "time" ) // Device holds the pins and the delay between steps type Device struct { pins [4]machine.Pin stepDelay int32 stepNumber int32 } // New returns a new easystepper driver given 4 pins numbers (not pin object), // number of steps and rpm func New(pin1, pin2, pin3, pin4 machine.Pin, steps int32, rpm int32) Device
// Move rotates the motor the number of given steps // (negative steps will rotate it the opposite direction) func (d *Device) Move(steps int32) { direction := steps > 0 if steps < 0 { steps = -steps - d.stepNumber } else { steps += d.stepNumber } var stepN int8 var s int32 for s = d.stepNumber; s < steps; s++ { time.Sleep(time.Duration(d.stepDelay) * time.Microsecond) if direction { stepN = int8(s % 4) } else { stepN = int8((s + 2*(s%2)) % 4) } d.stepMotor(stepN) } d.stepNumber = int32(stepN) } // stepMotor changes the pins' state to the correct step func (d *Device) stepMotor(step int8) { switch step { case 0: d.pins[0].High() d.pins[1].Low() d.pins[2].High() d.pins[3].Low() break case 1: d.pins[0].Low() d.pins[1].High() d.pins[2].High() d.pins[3].Low() break case 2: d.pins[0].Low() d.pins[1].High() d.pins[2].Low() d.pins[3].High() break case 3: d.pins[0].High() d.pins[1].Low() d.pins[2].Low() d.pins[3].High() break } }
{ pin1.Configure(machine.PinConfig{Mode: machine.PinOutput}) pin2.Configure(machine.PinConfig{Mode: machine.PinOutput}) pin3.Configure(machine.PinConfig{Mode: machine.PinOutput}) pin4.Configure(machine.PinConfig{Mode: machine.PinOutput}) return Device{ pins: [4]machine.Pin{pin1, pin2, pin3, pin4}, stepDelay: 60000000 / (steps * rpm), } }
bmt.go
// Copyright (c) 2018 The Ecosystem Authors // Distributed under the MIT software license, see the accompanying // file COPYING or or or http://www.opensource.org/licenses/mit-license.php // Package bmt provides a binary merkle tree implementation package bmt import ( "fmt" "hash" "io" "strings" "sync" "sync/atomic" ) /* Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size It is defined as the root hash of the binary merkle tree built over fixed size segments of the underlying chunk using any base hash function (e.g keccak 256 SHA3) It is used as the chunk hash function in swarm which in turn is the basis for the 128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash The BMT is optimal for providing compact inclusion proofs, i.e. prove that a segment is a substring of a chunk starting at a particular offset The size of the underlying segments is fixed at 32 bytes (called the resolution of the BMT hash), the EVM word size to optimize for on-chain BMT verification as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash. Two implementations are provided: * RefHasher is optimized for code simplicity and meant as a reference implementation * Hasher is optimized for speed taking advantage of concurrency with minimalistic control structure to coordinate the concurrent routines It implements the ChunkHash interface as well as the go standard hash.Hash interface */ const ( // DefaultSegmentCount is the maximum number of segments of the underlying chunk DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches // DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e, // the maximum number of concurrent BMT hashing operations performed by the same hasher DefaultPoolSize = 8 ) // BaseHasher is a hash.Hash constructor function used for the base hash of the BMT. type BaseHasher func() hash.Hash // Hasher a reusable hasher for fixed maximum size chunks representing a BMT // implements the hash.Hash interface // reuse pool of Tree-s for amortised memory allocation and resource control // supports order-agnostic concurrent segment writes // as well as sequential read and write // can not be called concurrently on more than one chunk // can be further appended after Sum // Reset gives back the Tree to the pool and guaranteed to leave // the tree and itself in a state reusable for hashing a new chunk type Hasher struct { pool *TreePool // BMT resource pool bmt *Tree // prebuilt BMT resource for flowcontrol and proofs blocksize int // segment size (size of hash) also for hash.Hash count int // segment count size int // for hash.Hash same as hashsize cur int // cursor position for rightmost currently open chunk segment []byte // the rightmost open segment (not complete) depth int // index of last level result chan []byte // result channel hash []byte // to record the result max int32 // max segments for SegmentWriter interface blockLength []byte // The block length that needes to be added in Sum } // New creates a reusable Hasher // implements the hash.Hash interface // pulls a new Tree from a resource pool for hashing each chunk func New(p *TreePool) *Hasher { return &Hasher{ pool: p, depth: depth(p.SegmentCount), size: p.SegmentSize, blocksize: p.SegmentSize, count: p.SegmentCount, result: make(chan []byte), } } // Node is a reuseable segment hasher representing a node in a BMT // it allows for continued writes after a Sum // and is left in completely reusable state after Reset type Node struct { level, index int // position of node for information/logging only initial bool // first and last node root bool // whether the node is root to a smaller BMT isLeft bool // whether it is left side of the parent double segment unbalanced bool // indicates if a node has only the left segment parent *Node // BMT connections state int32 // atomic increment impl concurrent boolean toggle left, right []byte } // NewNode constructor for segment hasher nodes in the BMT func NewNode(level, index int, parent *Node) *Node { return &Node{ parent: parent, level: level, index: index, initial: index == 0, isLeft: index%2 == 0, } } // TreePool provides a pool of Trees used as resources by Hasher // a Tree popped from the pool is guaranteed to have clean state // for hashing a new chunk // Hasher Reset releases the Tree to the pool type TreePool struct { lock sync.Mutex c chan *Tree hasher BaseHasher SegmentSize int SegmentCount int Capacity int count int } // NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity // on GetTree it reuses free Trees or creates a new one if size is not reached func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool { return &TreePool{ c: make(chan *Tree, capacity), hasher: hasher, SegmentSize: hasher().Size(), SegmentCount: segmentCount, Capacity: capacity, } } // Drain drains the pool until it has no more than n resources func (p *TreePool) Drain(n int) { p.lock.Lock() defer p.lock.Unlock() for len(p.c) > n { <-p.c p.count-- } } // Reserve is blocking until it returns an available Tree // it reuses free Trees or creates a new one if size is not reached func (p *TreePool) Reserve() *Tree { p.lock.Lock() defer p.lock.Unlock() var t *Tree if p.count == p.Capacity { return <-p.c } select { case t = <-p.c: default: t = NewTree(p.hasher, p.SegmentSize, p.SegmentCount) p.count++ } return t } // Release gives back a Tree to the pool. // This Tree is guaranteed to be in reusable state // does not need locking func (p *TreePool) Release(t *Tree) { p.c <- t // can never fail but... } // Tree is a reusable control structure representing a BMT // organised in a binary tree // Hasher uses a TreePool to pick one for each chunk hash // the Tree is 'locked' while not in the pool type Tree struct { leaves []*Node } // Draw draws the BMT (badly) func (t *Tree) Draw(hash []byte, d int) string { var left, right []string var anc []*Node for i, n := range t.leaves { left = append(left, fmt.Sprintf("%v", hashstr(n.left))) if i%2 == 0 { anc = append(anc, n.parent) } right = append(right, fmt.Sprintf("%v", hashstr(n.right))) } anc = t.leaves var hashes [][]string for l := 0; len(anc) > 0; l++ { var nodes []*Node hash := []string{""} for i, n := range anc { hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right))) if i%2 == 0 && n.parent != nil { nodes = append(nodes, n.parent) } } hash = append(hash, "") hashes = append(hashes, hash) anc = nodes } hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""}) total := 60 del := " " var rows []string for i := len(hashes) - 1; i >= 0; i-- { var textlen int hash := hashes[i] for _, s := range hash { textlen += len(s) } if total < textlen { total = textlen + len(hash) } delsize := (total - textlen) / (len(hash) - 1) if delsize > len(del) { delsize = len(del) } row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize])) rows = append(rows, row) } rows = append(rows, strings.Join(left, " ")) rows = append(rows, strings.Join(right, " ")) return strings.Join(rows, "\n") + "\n" } // NewTree initialises the Tree by building up the nodes of a BMT // segment size is stipulated to be the size of the hash // segmentCount needs to be positive integer and does not need to be // a power of two and can even be an odd number // segmentSize * segmentCount determines the maximum chunk size // hashed using the tree func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree { n := NewNode(0, 0, nil) n.root = true prevlevel := []*Node{n} // iterate over levels and creates 2^level nodes level := 1 count := 2 for d := 1; d <= depth(segmentCount); d++ { nodes := make([]*Node, count) for i := 0; i < len(nodes); i++ { parent := prevlevel[i/2] t := NewNode(level, i, parent) nodes[i] = t } prevlevel = nodes level++ count *= 2 } // the datanode level is the nodes on the last level where return &Tree{ leaves: prevlevel, } } // methods needed by hash.Hash // Size returns the size func (h *Hasher) Size() int { return h.size } // BlockSize returns the block size func (h *Hasher) BlockSize() int { return h.blocksize } // Sum returns the hash of the buffer // hash.Hash interface Sum method appends the byte slice to the underlying // data before it calculates and returns the hash of the chunk func (h *Hasher) Sum(b []byte) (r []byte) { t := h.bmt i := h.cur n := t.leaves[i] j := i // must run strictly before all nodes calculate // datanodes are guaranteed to have a parent if len(h.segment) > h.size && i > 0 && n.parent != nil { n = n.parent } else { i *= 2 } d := h.finalise(n, i) h.writeSegment(j, h.segment, d) c := <-h.result h.releaseTree() // sha3(length + BMT(pure_chunk)) if h.blockLength == nil { return c } res := h.pool.hasher() res.Reset() res.Write(h.blockLength) res.Write(c) return res.Sum(nil) } // Hasher implements the SwarmHash interface // Hash waits for the hasher result and returns it // caller must call this on a BMT Hasher being written to func (h *Hasher) Hash() []byte { return <-h.result } // Hasher implements the io.Writer interface // Write fills the buffer to hash // with every full segment complete launches a hasher go routine // that shoots up the BMT func (h *Hasher) Write(b []byte) (int, error) { l := len(b) if l <= 0 { return 0, nil } s := h.segment i := h.cur count := (h.count + 1) / 2 need := h.count*h.size - h.cur*2*h.size size := h.size if need > size { size *= 2 } if l < need { need = l } // calculate missing bit to complete current open segment rest := size - len(s) if need < rest { rest = need } s = append(s, b[:rest]...) need -= rest // read full segments and the last possibly partial segment for need > 0 && i < count-1 { // push all finished chunks we read h.writeSegment(i, s, h.depth) need -= size if need < 0 { size += need } s = b[rest : rest+size] rest += size i++
h.cur = i // otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full return l, nil } // Hasher implements the io.ReaderFrom interface // ReadFrom reads from io.Reader and appends to the data to hash using Write // it reads so that chunk to hash is maximum length or reader reaches EOF // caller must Reset the hasher prior to call func (h *Hasher) ReadFrom(r io.Reader) (m int64, err error) { bufsize := h.size*h.count - h.size*h.cur - len(h.segment) buf := make([]byte, bufsize) var read int for { var n int n, err = r.Read(buf) read += n if err == io.EOF || read == len(buf) { hash := h.Sum(buf[:n]) if read == len(buf) { err = NewEOC(hash) } break } if err != nil { break } n, err = h.Write(buf[:n]) if err != nil { break } } return int64(read), err } // Reset needs to be called before writing to the hasher func (h *Hasher) Reset() { h.getTree() h.blockLength = nil } // Hasher implements the SwarmHash interface // ResetWithLength needs to be called before writing to the hasher // the argument is supposed to be the byte slice binary representation of // the length of the data subsumed under the hash func (h *Hasher) ResetWithLength(l []byte) { h.Reset() h.blockLength = l } // Release gives back the Tree to the pool whereby it unlocks // it resets tree, segment and index func (h *Hasher) releaseTree() { if h.bmt != nil { n := h.bmt.leaves[h.cur] for ; n != nil; n = n.parent { n.unbalanced = false if n.parent != nil { n.root = false } } h.pool.Release(h.bmt) h.bmt = nil } h.cur = 0 h.segment = nil } func (h *Hasher) writeSegment(i int, s []byte, d int) { hash := h.pool.hasher() n := h.bmt.leaves[i] if len(s) > h.size && n.parent != nil { go func() { hash.Reset() hash.Write(s) s = hash.Sum(nil) if n.root { h.result <- s return } h.run(n.parent, hash, d, n.index, s) }() return } go h.run(n, hash, d, i*2, s) } func (h *Hasher) run(n *Node, hash hash.Hash, d int, i int, s []byte) { isLeft := i%2 == 0 for { if isLeft { n.left = s } else { n.right = s } if !n.unbalanced && n.toggle() { return } if !n.unbalanced || !isLeft || i == 0 && d == 0 { hash.Reset() hash.Write(n.left) hash.Write(n.right) s = hash.Sum(nil) } else { s = append(n.left, n.right...) } h.hash = s if n.root { h.result <- s return } isLeft = n.isLeft n = n.parent i++ } } // getTree obtains a BMT resource by reserving one from the pool func (h *Hasher) getTree() *Tree { if h.bmt != nil { return h.bmt } t := h.pool.Reserve() h.bmt = t return t } // atomic bool toggle implementing a concurrent reusable 2-state object // atomic addint with %2 implements atomic bool toggle // it returns true if the toggler just put it in the active/waiting state func (n *Node) toggle() bool { return atomic.AddInt32(&n.state, 1)%2 == 1 } func hashstr(b []byte) string { end := len(b) if end > 4 { end = 4 } return fmt.Sprintf("%x", b[:end]) } func depth(n int) (d int) { for l := (n - 1) / 2; l > 0; l /= 2 { d++ } return d } // finalise is following the zigzags on the tree belonging // to the final datasegment func (h *Hasher) finalise(n *Node, i int) (d int) { isLeft := i%2 == 0 for { // when the final segment's path is going via left segments // the incoming data is pushed to the parent upon pulling the left // we do not need toggle the state since this condition is // detectable n.unbalanced = isLeft n.right = nil if n.initial { n.root = true return d } isLeft = n.isLeft n = n.parent d++ } } // EOC (end of chunk) implements the error interface type EOC struct { Hash []byte // read the hash of the chunk off the error } // Error returns the error string func (e *EOC) Error() string { return fmt.Sprintf("hasher limit reached, chunk hash: %x", e.Hash) } // NewEOC creates new end of chunk error with the hash func NewEOC(hash []byte) *EOC { return &EOC{hash} }
} h.segment = s
script_parameter_info.go
// Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved. // This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. // Code generated. DO NOT EDIT. // Application Performance Monitoring Synthetic Monitoring API //
// package apmsynthetics import ( "github.com/oracle/oci-go-sdk/v54/common" ) // ScriptParameterInfo Information about script parameters. // isOverwritten specifies that the default parameter present in the script content is overwritten. type ScriptParameterInfo struct { ScriptParameter *ScriptParameter `mandatory:"true" json:"scriptParameter"` // If parameter value is default or overwritten. IsOverwritten *bool `mandatory:"true" json:"isOverwritten"` } func (m ScriptParameterInfo) String() string { return common.PointerString(m) }
// Use the Application Performance Monitoring Synthetic Monitoring API to query synthetic scripts and monitors.
translate.rs
use packet_sniffer::UdpPacket; use photon_decode::Photon; use crate::game::Event; use crate::photon_messages::into_game_message; use crate::photon_messages::Message; use crate::game::World;
pub fn udp_packet_to_game_events(game_world: &mut World, photon: &mut Photon, packet: &UdpPacket) -> Vec<Event> { if ! is_packet_valid(packet) { return vec![] } debug!("Raw payload: {:?}", &packet.payload); raw_to_photon_messages(photon, &packet.payload) .into_iter() .map(|message| game_world.transform(message)) .flatten() .flatten() .collect() } fn raw_to_photon_messages(photon: &mut Photon, packet_payload: &[u8]) -> Vec<Message> { return photon .decode(packet_payload) .into_iter() .filter_map(into_game_message) .collect() } fn is_packet_valid(packet: &UdpPacket) -> bool { return packet.destination_port == GAME_PORT || packet.source_port == GAME_PORT; } #[cfg(test)] mod tests { }
use log::*; static GAME_PORT : u16 = 5056;
tests.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use super::{ super::tests::{build_prng, build_sequence_poly}, Assertion, BoundaryConstraint, }; use crypto::{hashers::Blake3_256, RandomCoin}; use math::{fields::f128::BaseElement, log2, polynom, FieldElement, StarkField}; use std::collections::HashMap; use utils::collections::Vec; // BOUNDARY CONSTRAINT TESTS // ================================================================================================ #[test] fn boundary_constraint_from_single_assertion() { let mut test_prng = build_prng(); let (inv_g, mut twiddle_map, mut prng) = build_constraint_params(16); // constraint should be built correctly for register 0, step 0 let value = BaseElement::rand(); let assertion = Assertion::single(0, 0, value); let constraint = BoundaryConstraint::<BaseElement, BaseElement>::new( assertion, inv_g, &mut twiddle_map, prng.draw_pair().unwrap(), ); assert_eq!(0, constraint.register()); assert_eq!(vec![value], constraint.poly()); assert_eq!((0, BaseElement::ONE), constraint.poly_offset()); assert_eq!(test_prng.draw_pair::<BaseElement>().unwrap(), constraint.cc); // single value constraints should evaluate to trace_value - value let trace_value = BaseElement::rand(); assert_eq!( trace_value - value, constraint.evaluate_at(BaseElement::rand(), trace_value) ); // constraint is build correctly for register 1 step 8 let value = BaseElement::rand(); let assertion = Assertion::single(1, 8, value); let constraint = BoundaryConstraint::<BaseElement, BaseElement>::new( assertion, inv_g, &mut twiddle_map, prng.draw_pair().unwrap(), ); assert_eq!(1, constraint.register()); assert_eq!(vec![value], constraint.poly()); assert_eq!((0, BaseElement::ONE), constraint.poly_offset()); assert_eq!(test_prng.draw_pair::<BaseElement>().unwrap(), constraint.cc); // single value constraints should evaluate to trace_value - value let trace_value = BaseElement::rand(); assert_eq!( trace_value - value, constraint.evaluate_at(BaseElement::rand(), trace_value) ); // twiddle map was not touched assert!(twiddle_map.is_empty()); } #[test] fn
() { let mut test_prng = build_prng(); let (inv_g, mut twiddle_map, mut prng) = build_constraint_params(16); // constraint should be built correctly for register 0, step 0, stride 4 let value = BaseElement::rand(); let assertion = Assertion::periodic(0, 0, 4, value); let constraint = BoundaryConstraint::<BaseElement, BaseElement>::new( assertion, inv_g, &mut twiddle_map, prng.draw_pair().unwrap(), ); assert_eq!(0, constraint.register()); assert_eq!(vec![value], constraint.poly()); assert_eq!((0, BaseElement::ONE), constraint.poly_offset()); assert_eq!(test_prng.draw_pair::<BaseElement>().unwrap(), constraint.cc); // periodic value constraints should evaluate to trace_value - value let trace_value = BaseElement::rand(); assert_eq!( trace_value - value, constraint.evaluate_at(BaseElement::rand(), trace_value) ); // constraint should be built correctly for register 2, first step 3, stride 8 let value = BaseElement::rand(); let assertion = Assertion::periodic(2, 3, 8, value); let constraint = BoundaryConstraint::<BaseElement, BaseElement>::new( assertion, inv_g, &mut twiddle_map, prng.draw_pair().unwrap(), ); assert_eq!(2, constraint.register()); assert_eq!(vec![value], constraint.poly()); assert_eq!((0, BaseElement::ONE), constraint.poly_offset()); assert_eq!(test_prng.draw_pair::<BaseElement>().unwrap(), constraint.cc); // periodic value constraints should evaluate to trace_value - value let trace_value = BaseElement::rand(); assert_eq!( trace_value - value, constraint.evaluate_at(BaseElement::rand(), trace_value) ); // twiddle map was not touched assert!(twiddle_map.is_empty()); } #[test] fn boundary_constraint_from_sequence_assertion() { let mut test_prng = build_prng(); let (inv_g, mut twiddle_map, mut prng) = build_constraint_params(16); // constraint should be built correctly for register 0, first step 0, stride 4 let values = BaseElement::prng_vector([0; 32], 4); let constraint_poly = build_sequence_poly(&values, 16); let assertion = Assertion::sequence(0, 0, 4, values); let constraint = BoundaryConstraint::<BaseElement, BaseElement>::new( assertion, inv_g, &mut twiddle_map, prng.draw_pair().unwrap(), ); assert_eq!(0, constraint.register()); assert_eq!(constraint_poly, constraint.poly()); assert_eq!((0, BaseElement::ONE), constraint.poly_offset()); assert_eq!(test_prng.draw_pair::<BaseElement>().unwrap(), constraint.cc); assert_eq!(1, twiddle_map.len()); // sequence value constraints with no offset should evaluate to // trace_value - constraint_poly(x) let x = BaseElement::rand(); let trace_value = BaseElement::rand(); assert_eq!( trace_value - polynom::eval(&constraint_poly, x), constraint.evaluate_at(x, trace_value) ); // constraint should be built correctly for register 0, first step 3, stride 8 let values = BaseElement::prng_vector([1; 32], 2); let constraint_poly = build_sequence_poly(&values, 16); let assertion = Assertion::sequence(0, 3, 8, values.clone()); let constraint = BoundaryConstraint::<BaseElement, BaseElement>::new( assertion, inv_g, &mut twiddle_map, prng.draw_pair().unwrap(), ); assert_eq!(0, constraint.register()); assert_eq!(constraint_poly, constraint.poly()); assert_eq!((3, inv_g.exp(3)), constraint.poly_offset()); assert_eq!(test_prng.draw_pair::<BaseElement>().unwrap(), constraint.cc); assert_eq!(2, twiddle_map.len()); // sequence value constraints with offset should evaluate to // trace_value - constraint_poly(x * offset) let x = BaseElement::rand(); let trace_value = BaseElement::rand(); assert_eq!( trace_value - polynom::eval(&constraint_poly, x * constraint.poly_offset().1), constraint.evaluate_at(x, trace_value) ); } // HELPER FUNCTIONS // ================================================================================================ fn build_constraint_params( trace_length: usize, ) -> ( BaseElement, HashMap<usize, Vec<BaseElement>>, RandomCoin<BaseElement, Blake3_256<BaseElement>>, ) { let inv_g = BaseElement::get_root_of_unity(log2(trace_length)).inv(); let prng = build_prng(); let twiddle_map = HashMap::<usize, Vec<BaseElement>>::new(); (inv_g, twiddle_map, prng) }
boundary_constraint_from_periodic_assertion
base16-ashes.py
# -*- coding: utf-8 -*- # base16-prompt-toolkit (https://github.com/memeplex/base16-prompt-toolkit) # Base16 Prompt Toolkit template by Carlos Pita ([email protected] # Ashes scheme by Jannik Siebert (https://github.com/janniks) try: # older than v2 from prompt_toolkit.output.vt100 import _256_colors except ModuleNotFoundError: # version 2 from prompt_toolkit.formatted_text.ansi import _256_colors from pygments.style import Style from pygments.token import (Keyword, Name, Comment, String, Error, Text, Number, Operator, Literal, Token) # See http://chriskempson.com/projects/base16/ for a description of the role # of the different colors in the base16 palette. base00 = '#1C2023' base01 = '#393F45' base02 = '#565E65' base03 = '#747C84' base04 = '#ADB3BA' base05 = '#C7CCD1' base06 = '#DFE2E5' base07 = '#F3F4F5' base08 = '#C7AE95' base09 = '#C7C795' base0A = '#AEC795' base0B = '#95C7AE' base0C = '#95AEC7' base0D = '#AE95C7' base0E = '#C795AE' base0F = '#C79595' # See https://github.com/jonathanslenders/python-prompt-toolkit/issues/355 colors = (globals()['base0' + d] for d in '08BADEC5379F1246') for i, color in enumerate(colors): r, g, b = int(color[1:3], 16), int(color[3:5], 16), int(color[5:], 16) _256_colors[r, g, b] = i + 6 if i > 8 else i # See http://pygments.org/docs/tokens/ for a description of the different # pygments tokens. class
(Style): background_color = base00 highlight_color = base02 default_style = base05 styles = { Text: base05, Error: '%s bold' % base08, Comment: base03, Keyword: base0E, Keyword.Constant: base09, Keyword.Namespace: base0D, Name.Builtin: base0D, Name.Function: base0D, Name.Class: base0D, Name.Decorator: base0E, Name.Exception: base08, Number: base09, Operator: base0E, Literal: base0B, String: base0B } # See https://github.com/jonathanslenders/python-prompt-toolkit/blob/master/prompt_toolkit/styles/defaults.py # for a description of prompt_toolkit related pseudo-tokens. overrides = { Token.Prompt: base0B, Token.PromptNum: '%s bold' % base0B, Token.OutPrompt: base08, Token.OutPromptNum: '%s bold' % base08, Token.Menu.Completions.Completion: 'bg:%s %s' % (base01, base04), Token.Menu.Completions.Completion.Current: 'bg:%s %s' % (base04, base01), Token.MatchingBracket.Other: 'bg:%s %s' % (base03, base00) }
Base16Style
client.go
package ccxtrest import ( "bytes" "encoding/json" "flag" "fmt" "github.com/ccxt/ccxt/go" "github.com/ccxt/ccxt/go/util" "io" "io/ioutil" "log" "net/http" "net/url" "path" ) var ( debug = flag.Bool("debug", false, "Log outgoing requests and responses to server") server = flag.String("server", "http://localhost:3000/", "ccxt-rest server addr") ) const ( endpointExchanges = "exchanges" ) func ListExchanges(cli *http.Client) ([]string, error) { if cli == nil { cli = http.DefaultClient } resp, err := cli.Get(*server + endpointExchanges) if err != nil { return nil, err } dec := json.NewDecoder(resp.Body) var exchanges []string return exchanges, dec.Decode(&exchanges) } // Exchange implements ccxt.Exchange interface. // It forwards requests to ccxt-rest server and // unwrap responses to relevant models from ccxt. type Exchange struct { ccxt.ExchangeInfo Key string ID string Markets map[string]ccxt.Market BaseURL string *http.Client } func NewExchange(exchange string, id string, key, secret string, cli *http.Client) (*Exchange, error) { x := &Exchange{ Client: cli, Key: exchange, ID: id, BaseURL: *server, } if x.Client == nil { x.Client = http.DefaultClient } return x, x.Init(key, secret) } func (x *Exchange) Init(key, secret string) error { params := map[string]interface{}{ "id": x.ID, "apiKey": key, "secret": secret, } return x.Post(path.Join(endpointExchanges, x.Key), &x.ExchangeInfo, params) } func (x *Exchange) Info() ccxt.ExchangeInfo { return x.ExchangeInfo } func (x *Exchange) FetchCurrencies() (c []ccxt.Currency, err error) { return c, x.Post(x.Path("fetchCurrencies"), &c, nil) } func (x *Exchange) FetchMarkets() (m []ccxt.Market, err error) { return m, x.Post(x.Path("fetchMarkets"), &m, nil) } func (x *Exchange) LoadMarkets(reload bool) (m map[string]ccxt.Market, err error) { if reload || x.Markets == nil { x.Markets, err = ccxt.LoadMarkets(x) } return x.Markets, err } func (x *Exchange) GetMarket(id string) (m ccxt.Market, err error) { if x.Markets == nil { _, err := x.LoadMarkets(false) if err != nil { return m, err } } m, ok := x.Markets[id] if !ok { err = ccxt.NotSupportedError(fmt.Sprintf("market %s not found for %s", id, x.Key)) } return m, err } func (x *Exchange) FetchTickers( symbols []string, params map[string]interface{}, ) (t map[string]ccxt.Ticker, err error) { return t, x.Post(x.Path("fetchTickers"), &t, []interface{}{symbols, params}) } func (x *Exchange) FetchTicker( symbol string, params map[string]interface{}, ) (t ccxt.Ticker, err error) { return t, x.Post(x.Path("fetchTicker"), &t, []interface{}{symbol, params}) } func (x *Exchange) FetchOHLCV( symbol string, timeframe string, since *util.JSONTime, limit *int, params map[string]interface{}, ) (o []ccxt.OHLCV, err error) { return o, x.Post(x.Path("fetchOHLCV"), &o, []interface{}{symbol, timeframe, since, limit, params}) } func (x *Exchange) FetchOrderBook( symbol string, limit *int, params map[string]interface{}, ) (o ccxt.OrderBook, err error) { return o, x.Post(x.Path("fetchOrderBook"), &o, []interface{}{symbol, limit, params}) } func (x *Exchange) FetchL2OrderBook( symbol string, limit *int, params map[string]interface{}, ) (o ccxt.OrderBook, err error) { return o, x.Post(x.Path("fetchL2OrderBook"), &o, []interface{}{symbol, limit, params}) } func (x *Exchange) FetchTrades( symbol string, since *util.JSONTime, params map[string]interface{}, ) (t []ccxt.Trade, err error) { return t, x.Post(x.Path("fetchTrades"), &t, []interface{}{symbol, since, params}) } func (x *Exchange) FetchOrder( id string, symbol *string, params map[string]interface{}, ) (o ccxt.Order, err error) { return o, x.Post(x.Path("fetchOrder"), &o, []interface{}{id, symbol, params}) } func (x *Exchange) FetchOrders( symbol *string, since *util.JSONTime, limit *int, params map[string]interface{}, ) (o []ccxt.Order, err error) { return o, x.Post(x.Path("fetchOrders"), &o, []interface{}{symbol, since, limit, params}) } func (x *Exchange) FetchOpenOrders( symbol *string, since *util.JSONTime, limit *int, params map[string]interface{}, ) (o []ccxt.Order, err error) { return o, x.Post(x.Path("fetchOpenOrders"), &o, []interface{}{symbol, since, limit, params}) } func (x *Exchange) FetchClosedOrders( symbol *string, since *util.JSONTime, limit *int, params map[string]interface{}, ) (o []ccxt.Order, err error) { return o, x.Post(x.Path("fetchClosedOrders"), &o, []interface{}{symbol, since, limit, params}) } func (x *Exchange) FetchMyTrades( symbol *string, since *util.JSONTime, limit *int, params map[string]interface{}, ) (t []ccxt.Trade, err error) { return t, x.Post(x.Path("fetchMyTrades"), &t, []interface{}{symbol, since, limit, params}) } func (x *Exchange) FetchBalance(params map[string]interface{}) (b ccxt.Balances, err error) { return b, x.Post(x.Path("fetchBalance"), &b, []interface{}{params}) } func (x *Exchange) CreateOrder( symbol, t, side string, amount float64, price *float64, params map[string]interface{}, ) (o ccxt.Order, err error) { return o, x.Post(x.Path("createOrder"), &o, []interface{}{symbol, t, side, amount, price, params}) } func (x *Exchange) CancelOrder( id string, symbol *string, params map[string]interface{}) error { return x.Post(x.Path("cancelOrder"), nil, []interface{}{id, symbol, params}) } func (x *Exchange) CreateLimitBuyOrder( symbol string, amount float64, price *float64, params map[string]interface{}, ) (ccxt.Order, error) { return x.CreateOrder(symbol, "limit", "buy", amount, price, params) } func (x *Exchange) CreateLimitSellOrder( symbol string, amount float64, price *float64, params map[string]interface{}, ) (ccxt.Order, error) { return x.CreateOrder(symbol, "limit", "sell", amount, price, params) } func (x *Exchange) CreateMarketBuyOrder( symbol string, amount float64, params map[string]interface{}, ) (ccxt.Order, error) { return x.CreateOrder(symbol, "market", "buy", amount, nil, params) } func (x *Exchange) CreateMarketSellOrder( symbol string, amount float64, params map[string]interface{}, ) (ccxt.Order, error) { return x.CreateOrder(symbol, "market", "sell", amount, nil, params) } func (x *Exchange) Path(endpoint string) string { return path.Join(endpointExchanges, x.Key, x.ID, endpoint) } func (x *Exchange) Post(endpoint string, dest interface{}, params interface{}) error { return x.Do(http.MethodPost, endpoint, dest, params) } func (x *Exchange) Get(endpoint string, dest interface{}) error { return x.Do(http.MethodGet, endpoint, dest, nil) } func (x *Exchange) Do(method, endpoint string, dest interface{}, params interface{}) error { if s := path.Base(endpoint); s != x.Key && !x.Has[s] { return ccxt.NotSupportedError(s) } u, err := url.Parse(x.BaseURL) if err != nil { return fmt.Errorf("couldn't parse BaseURL for %s: %s", x.ID, err) } u.Path = path.Join(u.Path, endpoint) var body io.Reader if params != nil { body = new(bytes.Buffer) enc := json.NewEncoder(body.(*bytes.Buffer)) err := enc.Encode(params) if err != nil { return fmt.Errorf("couldn't json encode params: %s", err) } } rq, err := http.NewRequest(method, u.String(), body) if err != nil { return fmt.Errorf("error making request: %s", err) } if *debug { s := fmt.Sprintf("%s %s", method, u.String()) if params != nil { // do not display api credentials, just in case if p, ok := params.(map[string]interface{}); ok { if apiKey, ok := p["apiKey"]; ok && apiKey != "" { p["apiKey"] = "*" } if secret, ok := p["secret"]; ok && secret != "" { p["secret"] = "*" } } pparams, _ := json.MarshalIndent(params, "", " ") s += "\n" + string(pparams) } log.Println(s) } resp, err := x.Client.Do(rq) if err != nil { return err } // read body respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return fmt.Errorf("error reading body: %s", err) } if *debug { s := fmt.Sprint(resp.StatusCode) if len(respBody) > 0 { var jsonMapResp map[string]interface{} err := json.Unmarshal(respBody, &jsonMapResp) if err == nil { // do not display api credentials, just in case if apiKey, ok := jsonMapResp["apiKey"]; ok && apiKey != "" { jsonMapResp["apiKey"] = "*" } if secret, ok := jsonMapResp["secret"]; ok && secret != ""
b, _ := json.MarshalIndent(jsonMapResp, "", " ") s += "\n" + string(b) } else { prettyBody := bytes.NewBuffer(nil) _ = json.Indent(prettyBody, respBody, "", " ") s += "\n" + string(prettyBody.Bytes()) } } else { s += " - empty body" } log.Println(s) } if resp.StatusCode > 299 || resp.StatusCode < 200 { // unpack ccxt-rest error message & type var ccxtErr = struct { Message string `json:"message"` Type string `json:"type"` }{} err = json.Unmarshal(respBody, &ccxtErr) if err == nil && ccxtErr.Message != "" { return ccxt.TypedError(ccxtErr.Type, ccxtErr.Message) } return fmt.Errorf("%s: %d - %s", u, resp.StatusCode, respBody) } if dest == nil { return nil } err = json.Unmarshal(respBody, dest) if err != nil { return fmt.Errorf("error decoding ccxt-rest response: %s\n", err) } return nil }
{ jsonMapResp["secret"] = "*" }
main.rs
use std::collections::HashMap; use std::fmt; use std::io; use std::io::prelude::*; use std::io::BufReader; #[derive(Debug)] struct Morpheme { surface: String, pos: String, pos1: String, pos2: String, pos3: String, conj_rule: String, conj_type: String, base: String, yomi: String, pron: String, } impl Morpheme { fn from(surface: &str) -> Morpheme { Morpheme { surface: String::from(surface), pos: String::from("*"), pos1: String::from("*"), pos2: String::from("*"), pos3: String::from("*"), conj_rule: String::from("*"), conj_type: String::from("*"), base: String::from("*"), yomi: String::from("*"), pron: String::from("*"), } } fn from_text(text: &str) -> Option<Morpheme> { let v: Vec<_> = text.split("\t").collect(); if v.len() < 2 { return None; } let mut m = Morpheme::from(&v[0]); let v: Vec<_> = v[1].split(",").collect(); if v.len() < 9 { return None; } m.pos = v[0].to_string(); m.pos1 = v[1].to_string(); m.pos2 = v[2].to_string(); m.pos3 = v[3].to_string(); m.conj_rule = v[4].to_string(); m.conj_type = v[5].to_string(); m.base = v[6].to_string(); m.yomi = v[7].to_string(); m.pron = v[8].to_string(); Some(m) } } impl fmt::Display for Morpheme { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "Morpheme({}-{},{},{},{},{},{},{},{},{})", self.surface, self.pos, self.pos1, self.pos2, self.pos3, self.conj_rule, self.conj_type, self.base, self.yomi, self.pron ) } } fn main() -> io::Result<()>
{ let f = BufReader::new(io::stdin()); let mut neko: Vec<Vec<Morpheme>> = Vec::new(); let mut sentence: Vec<Morpheme> = Vec::new(); for line in f.lines() { let line = line?; if line.starts_with("EOS") { if sentence.len() > 0 { neko.push(sentence); sentence = Vec::new(); } } else { if let Some(m) = Morpheme::from_text(&line) { sentence.push(m); } } } if sentence.len() > 0 { neko.push(sentence); } let mut map: HashMap<String, usize> = HashMap::new(); for m in neko.iter().flat_map(|x| x.iter()) { *map.entry(m.surface.clone()).or_insert(0) += 1; } let mut v: Vec<_> = map.iter().collect(); v.sort_by(|a, b| b.1.cmp(a.1)); for (k, v) in v { println!("{}\t{}", v, k); } Ok(()) }
info.go
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmd import ( "bytes" "context" "encoding/json" "flag" "fmt" "net/url" "os" "strings" "golang.org/x/tools/internal/lsp/browser" "golang.org/x/tools/internal/lsp/debug" "golang.org/x/tools/internal/lsp/source" )
} func (v *version) Name() string { return "version" } func (v *version) Usage() string { return "" } func (v *version) ShortHelp() string { return "print the gopls version information" } func (v *version) DetailedHelp(f *flag.FlagSet) { fmt.Fprint(f.Output(), ``) f.PrintDefaults() } // Run prints version information to stdout. func (v *version) Run(ctx context.Context, args ...string) error { debug.PrintVersionInfo(ctx, os.Stdout, v.app.verbose(), debug.PlainText) return nil } // bug implements the bug command. type bug struct{} func (b *bug) Name() string { return "bug" } func (b *bug) Usage() string { return "" } func (b *bug) ShortHelp() string { return "report a bug in gopls" } func (b *bug) DetailedHelp(f *flag.FlagSet) { fmt.Fprint(f.Output(), ``) f.PrintDefaults() } const goplsBugPrefix = "x/tools/gopls: <DESCRIBE THE PROBLEM>" const goplsBugHeader = `ATTENTION: Please answer these questions BEFORE submitting your issue. Thanks! #### What did you do? If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on play.golang.org is better. A failing unit test is the best. #### What did you expect to see? #### What did you see instead? ` // Run collects some basic information and then prepares an issue ready to // be reported. func (b *bug) Run(ctx context.Context, args ...string) error { buf := &bytes.Buffer{} fmt.Fprint(buf, goplsBugHeader) debug.PrintVersionInfo(ctx, buf, true, debug.Markdown) body := buf.String() title := strings.Join(args, " ") if !strings.HasPrefix(title, goplsBugPrefix) { title = goplsBugPrefix + title } if !browser.Open("https://github.com/golang/go/issues/new?title=" + url.QueryEscape(title) + "&body=" + url.QueryEscape(body)) { fmt.Print("Please file a new issue at golang.org/issue/new using this template:\n\n") fmt.Print(body) } return nil } type apiJSON struct{} func (sj *apiJSON) Name() string { return "api-json" } func (sj *apiJSON) Usage() string { return "" } func (sj *apiJSON) ShortHelp() string { return "print json describing gopls API" } func (sj *apiJSON) DetailedHelp(f *flag.FlagSet) { fmt.Fprint(f.Output(), ``) f.PrintDefaults() } func (sj *apiJSON) Run(ctx context.Context, args ...string) error { js, err := json.MarshalIndent(source.GeneratedAPIJSON, "", "\t") if err != nil { return err } fmt.Fprint(os.Stdout, string(js)) return nil }
// version implements the version command. type version struct { app *Application
to-hex.ts
/** * Convert a string to a hex string encoded with UTF-8 * @param {number} value * @return {string} */ export function
(value: number): string { let hex = value.toString(16); if ((hex.length % 2) > 0) { hex = '0' + hex; } return Buffer .from(hex, 'hex') .toString('utf-8'); }
toHex
helper_test.go
package git import ( "testing" "github.com/stretchr/testify/require" "gitlab.com/gitlab-org/gitaly/v14/internal/testhelper" ) func TestMain(m *testing.M) { testhelper.Run(m) } func TestValidateRevision(t *testing.T) { testCases := []struct { rev string ok bool }{ {rev: "foo/bar", ok: true}, {rev: "-foo/bar", ok: false}, {rev: "foo bar", ok: false}, {rev: "foo\x00bar", ok: false}, {rev: "foo/bar:baz", ok: false}, }
require.NoError(t, err) } else { require.Error(t, err) } }) } }
for _, tc := range testCases { t.Run(tc.rev, func(t *testing.T) { err := ValidateRevision([]byte(tc.rev)) if tc.ok {
analytics.js
function
() { //calculate 12 months random users //calculate 12 levels of customers var startNum = Math.floor( Math.random() * 5000) + 5000; var visitors = new Array(12); var customers = new Array(); var magicX = .5; visitors[0] = startNum; customers[0] = Math.floor(visitors[0] * magicX); for(var i = 1; i < 12; i++){ if(Math.random() > .5){ visitors[i] = visitors[i-1] + Math.floor( Math.random() * 200 ); }else{ visitors[i] = visitors[i-1] - Math.floor( Math.random() * 200 ); }//end if up or down customers[i] = Math.floor(visitors[i] * magicX); }//end for 12 months $('#analytics_chart').highcharts({ chart: { type: 'line', backgroundColor: "rgba(255, 255, 255, 0.02)" }, title: { text: 'Random Visitors and Customers', x: -20 //center }, subtitle: { text: 'Source: Math.random', x: -20 }, xAxis: { categories: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] }, yAxis: { title: { text: '# of Users' }, plotLines: [{ value: 0, width: 1, color: '#808080' }] }, legend: { layout: 'vertical', align: 'right', verticalAlign: 'middle', borderWidth: 0 }, series: [{ name: 'Visitors', data: visitors }, { name: 'Customers', data: customers }] }); };
analyticsChart
tasks.controller.ts
import { Controller, Get, Post, Put, Delete, Body, Param, Req, Res } from '@nestjs/common'; import { CreateTaskDto } from './dto/create-task.dto'; import { Task } from './interfaces/Task'; import { TasksService } from './tasks.service'; @Controller('tasks') export class
{ constructor(private taskService: TasksService) {} @Get() // Solicitar datos getTasks(): Promise<Task[]> { return this.taskService.getTasks(); } @Get(':taskId') getTask(@Param('taskId') taskId: string) { return this.taskService.getTask(taskId); } @Post() // Enviar datos para almacenar createTask(@Body() task: CreateTaskDto): string { console.log(task.title, task.description, task.done); return 'Creating a task'; } @Delete(':id') deleteTask(@Param('id') id): string { console.log(id); return `Deleting a task number: ${id}`; } @Put(':id') updateTask(@Body() task:CreateTaskDto, @Param('id') id): string { console.log(task); console.log(id); return 'Updating a task'; } // getTasks(@Req() req, @Res() res): Response { // return res.send("Hello world"); // } }
TasksController
test_website_group.py
# coding: utf-8 """ LogicMonitor REST API LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import logicmonitor_sdk from logicmonitor_sdk.models.website_group import WebsiteGroup # noqa: E501 from logicmonitor_sdk.rest import ApiException class TestWebsiteGroup(unittest.TestCase): """WebsiteGroup unit test stubs""" def setUp(self): pass def tearDown(self): pass def testWebsiteGroup(self):
if __name__ == '__main__': unittest.main()
"""Test WebsiteGroup""" # FIXME: construct object with mandatory attributes with example values # model = logicmonitor_sdk.models.website_group.WebsiteGroup() # noqa: E501 pass
codesigndoc.go
package codesigndocuitests import ( "errors" "fmt" "github.com/bitrise-io/codesigndoc/codesign" "github.com/bitrise-io/go-utils/log" "github.com/bitrise-io/go-xcode/certificateutil" "github.com/bitrise-io/go-xcode/export" "github.com/bitrise-io/go-xcode/profileutil" ) const collectCodesigningFilesInfo = `To collect available code sign files, we search for installed Provisioning Profiles:" - which has installed Codesign Identity in your Keychain" - which can provision your application target's bundle ids" - which has the project defined Capabilities set" ` // CollectCodesignFiles collects the codesigning files for the UITests-Runner.app // and filters them for the specified export method. func CollectCodesignFiles(buildPath string, certificatesOnly bool) ([]certificateutil.CertificateInfoModel, []profileutil.ProvisioningProfileInfoModel, error) { // Find out the XcArchive type certificateType := codesign.IOSCertificate profileType := profileutil.ProfileTypeIos // Certificates certificates, err := codesign.InstalledCertificates(certificateType) if err != nil { return nil, nil, fmt.Errorf("failed to list installed code signing identities, error: %s", err) } log.Debugf("Installed certificates:") for _, installedCertificate := range certificates { log.Debugf(installedCertificate.String()) } // Profiles profiles, err := profileutil.InstalledProvisioningProfileInfos(profileType) if err != nil { return nil, nil, fmt.Errorf("failed to list installed provisioning profiles, error: %s", err) } log.Debugf("Installed profiles:") for _, profileInfo := range profiles { log.Debugf(profileInfo.String(certificates...)) } return getFilesToExport(buildPath, certificates, profiles, certificatesOnly) } func getFilesToExport(buildPath string, installedCertificates []certificateutil.CertificateInfoModel, installedProfiles []profileutil.ProvisioningProfileInfoModel, certificatesOnly bool) ([]certificateutil.CertificateInfoModel, []profileutil.ProvisioningProfileInfoModel, error) { var certificatesToExport []certificateutil.CertificateInfoModel var profilesToExport []profileutil.ProvisioningProfileInfoModel if certificatesOnly { exportCertificate, err := collectExportCertificate(installedCertificates) if err != nil { return nil, nil, err } certificatesToExport = append(certificatesToExport, exportCertificate...) } else { testRunners, err := NewIOSTestRunners(buildPath) if err != nil { return nil, nil, err } for _, testRunner := range testRunners { certsToExport, profsToExport, err := collectCertificatesAndProfiles(*testRunner, installedCertificates, installedProfiles) if err != nil { return nil, nil, err } certificatesToExport = append(certificatesToExport, certsToExport...) profilesToExport = append(profilesToExport, profsToExport...) } } return certificatesToExport, profilesToExport, nil } func
(testRunner IOSTestRunner, installedCertificates []certificateutil.CertificateInfoModel, installedProfiles []profileutil.ProvisioningProfileInfoModel) ([]certificateutil.CertificateInfoModel, []profileutil.ProvisioningProfileInfoModel, error) { groups, err := collectExportCodeSignGroups(testRunner, installedCertificates, installedProfiles) if err != nil { return nil, nil, err } var exportCodeSignGroups []export.CodeSignGroup for _, group := range groups { exportCodeSignGroup, ok := group.(*export.IosCodeSignGroup) if ok { exportCodeSignGroups = append(exportCodeSignGroups, exportCodeSignGroup) } } if len(exportCodeSignGroups) == 0 { return nil, nil, errors.New("no export code sign groups collected") } certificates, profiles := extractCertificatesAndProfiles(exportCodeSignGroups...) return certificates, profiles, nil }
collectCertificatesAndProfiles
library.rs
use std::{ borrow::Cow, collections::HashSet, env, ffi::OsStr, fmt, iter, path::{Path, PathBuf}, }; use dunce::canonicalize; use glob::glob; use semver::Version; use super::{ cleanup_lib_filename, cmake_probe::CmakeProbe, get_version_from_headers, MANIFEST_DIR, OUT_DIR, Result, }; struct PackageName; impl PackageName { pub fn env() -> Option<Cow<'static, str>> { env::var("OPENCV_PACKAGE_NAME") .ok() .map(|x| x.into()) } pub fn env_pkg_config() -> Option<Cow<'static, str>> { env::var("OPENCV_PKGCONFIG_NAME") .ok() .map(|x| x.into()) } pub fn env_cmake() -> Option<Cow<'static, str>> { env::var("OPENCV_CMAKE_NAME") .ok() .map(|x| x.into()) } pub fn env_vcpkg() -> Option<Cow<'static, str>> { env::var("OPENCV_VCPKG_NAME") .ok() .map(|x| x.into()) } pub fn pkg_config() -> Vec<Cow<'static, str>> { if let Some(env_name) = Self::env().or_else(Self::env_pkg_config) { vec![env_name] } else { vec!["opencv4".into(), "opencv".into()] } } pub fn cmake() -> Cow<'static, str> { Self::env() .or_else(Self::env_cmake) .unwrap_or_else(|| "OpenCV".into()) } pub fn vcpkg() -> Vec<Cow<'static, str>> { if let Some(env_name) = Self::env().or_else(Self::env_vcpkg) { vec![env_name] } else { vec!["opencv4".into(), "opencv3".into()] } } } #[derive(Clone, Copy, Debug)] pub struct EnvList<'s> { src: &'s str, } impl<'s> EnvList<'s> { pub fn is_extend(&self) -> bool { self.src.starts_with('+') } pub fn iter(&self) -> impl Iterator<Item=&'s str> { if self.is_extend() { &self.src[1..] } else { self.src }.split(',') } } impl<'s> From<&'s str> for EnvList<'s> { fn from(src: &'s str) -> Self { Self { src } } } impl fmt::Display for EnvList<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(self.src, f) } } #[derive(Debug)] pub struct Library { pub include_paths: Vec<PathBuf>, pub version: Version, pub cargo_metadata: Vec<String>, } impl Library { fn process_library_list(libs: impl IntoIterator<Item=impl Into<PathBuf>>) -> impl Iterator<Item=String> { libs.into_iter() .filter_map(|x| { let mut path: PathBuf = x.into(); let is_framework = path.extension() .and_then(OsStr::to_str) .map_or(false, |e| e.eq_ignore_ascii_case("framework")); if let Some(filename) = path.file_name().and_then(cleanup_lib_filename) { let filename = filename.to_owned(); path.set_file_name(filename); } path.file_name() .and_then(|f| f.to_str() .map(|f| if is_framework { format!("framework={}", f) } else { f.to_owned() }) ) }) } fn version_from_include_paths(include_paths: impl IntoIterator<Item=impl AsRef<Path>>) -> Option<Version> { include_paths.into_iter().find_map(|x| get_version_from_headers(x.as_ref())) } #[inline] fn emit_link_search(path: &Path, typ: Option<&str>) -> String { format!( "cargo:rustc-link-search={}{}", typ.map_or_else(|| "".to_string(), |t| format!("{}=", t)), path.to_str().expect("Can't convert link search path to UTF-8 string") ) } #[inline] fn emit_link_lib(lib: &str, typ: Option<&str>) -> String { format!("cargo:rustc-link-lib={}{}", typ.map_or_else(|| "".to_string(), |t| format!("{}=", t)), lib) } fn process_env_var_list<'a, T: From<&'a str>>(env_list: Option<EnvList<'a>>, sys_list: Vec<T>) -> Vec<T> { if let Some(include_paths) = env_list { let mut includes = if include_paths.is_extend() { sys_list } else { vec![] }; includes.extend(include_paths.iter().filter(|v| !v.is_empty()).map(T::from)); includes } else { sys_list } } fn process_link_paths<'a>(link_paths: Option<EnvList>, sys_link_paths: Vec<PathBuf>, typ: Option<&'a str>) -> impl Iterator<Item=String> + 'a { Self::process_env_var_list(link_paths, sys_link_paths).into_iter() .map(move |path| { let out = iter::once(Self::emit_link_search(&path, typ)); #[cfg(target_os = "macos")] { out.chain( iter::once(Self::emit_link_search(&path, Some("framework"))) ) } #[cfg(not(target_os = "macos"))] { out } }) .flatten() } fn process_link_libs<'a>(link_libs: Option<EnvList>, sys_link_libs: Vec<String>, typ: Option<&'a str>) -> impl Iterator<Item=String> + 'a { Self::process_library_list(Self::process_env_var_list(link_libs, sys_link_libs).into_iter()) .map(move |l| Self::emit_link_lib(&l, typ)) } pub fn probe_from_paths(include_paths: Option<EnvList>, link_paths: Option<EnvList>, link_libs: Option<EnvList>) -> Result<Self> { if let (Some(include_paths), Some(link_paths), Some(link_libs)) = (include_paths, link_paths, link_libs) { if include_paths.is_extend() || link_paths.is_extend() || link_libs.is_extend() { return Err("Some environment variables extend the system default paths (i.e. start with '+')".into()); } eprintln!("=== Configuring OpenCV library from the environment:"); eprintln!("=== include_paths: {}", include_paths); eprintln!("=== link_paths: {}", link_paths); eprintln!("=== link_libs: {}", link_libs); let mut cargo_metadata = Vec::with_capacity(64); let include_paths: Vec<_> = include_paths.iter() .map(PathBuf::from) .collect(); let version = Self::version_from_include_paths(&include_paths); cargo_metadata.extend(Self::process_link_paths(Some(link_paths), vec![], None)); cargo_metadata.extend(Self::process_link_libs(Some(link_libs), vec![], None)); Ok(Self { include_paths, version: version.unwrap_or_else(|| Version::new(0, 0, 0)), cargo_metadata, }) } else { Err("Some environment variables are missing".into()) } } pub fn probe_pkg_config(include_paths: Option<EnvList>, link_paths: Option<EnvList>, link_libs: Option<EnvList>) -> Result<Self> { eprintln!("=== Probing OpenCV library using pkg_config"); let mut config = pkg_config::Config::new(); config.cargo_metadata(false); let mut errors = vec![]; let mut opencv = None; let possible_opencvs = PackageName::pkg_config().into_iter() .map(|pkg_name| config.probe(&pkg_name)); for possible_opencv in possible_opencvs { match possible_opencv { Ok(possible_opencv) => { opencv = Some(possible_opencv); break; } Err(e) => { errors.push(e.to_string()); } } } let opencv = opencv.ok_or_else(|| errors.join(", "))?; let mut cargo_metadata = Vec::with_capacity(64); cargo_metadata.extend(Self::process_link_paths(link_paths, opencv.link_paths, None)); if link_paths.map_or(true, |link_paths| link_paths.is_extend()) { cargo_metadata.extend(Self::process_link_paths(None, opencv.framework_paths, Some("framework"))); } cargo_metadata.extend(Self::process_link_libs(link_libs, opencv.libs, None)); if link_libs.map_or(false, |link_libs| link_libs.is_extend()) { cargo_metadata.extend(Self::process_link_libs(None, opencv.frameworks, Some("framework"))); } let include_paths = Self::process_env_var_list(include_paths, opencv.include_paths); Ok(Self { include_paths, version: Version::parse(&opencv.version)?, cargo_metadata, }) } pub fn probe_cmake(include_paths: Option<EnvList>, link_paths: Option<EnvList>, link_libs: Option<EnvList>, toolchain: Option<&Path>, cmake_bin: Option<&Path>, ninja_bin: Option<&Path>) -> Result<Self> { eprintln!( "=== Probing OpenCV library using cmake{}", toolchain.map(|tc| format!(" with toolchain: {}", tc.display())).unwrap_or_else(|| "".to_string()) ); let src_dir = MANIFEST_DIR.join("cmake"); let package_name = PackageName::cmake(); let cmake = CmakeProbe::new( env::var_os("OPENCV_CMAKE_BIN") .map(PathBuf::from) .or_else(|| cmake_bin.map(PathBuf::from)), &OUT_DIR, &src_dir, package_name.as_ref(), toolchain, env::var_os("PROFILE").map_or(false, |p| p == "release"), ); let mut probe_result = cmake.probe_ninja(ninja_bin) .or_else(|e| { eprintln!("=== Probing with cmake ninja generator failed, will try makefile generator, error: {}", e); cmake.probe_makefile() }) .or_else(|e| { eprintln!("=== Probing with cmake Makefile generator failed, will try deprecated find_package, error: {}", e); cmake.probe_find_package() })?; if probe_result.version.is_none() { probe_result.version = Self::version_from_include_paths(&probe_result.include_paths); } let mut cargo_metadata = Vec::with_capacity(probe_result.link_paths.len() + probe_result.link_libs.len()); cargo_metadata.extend(Self::process_link_paths(link_paths, probe_result.link_paths, None)); cargo_metadata.extend(Self::process_link_libs(link_libs, probe_result.link_libs, None)); Ok(Self { include_paths: Self::process_env_var_list(include_paths, probe_result.include_paths), version: probe_result.version.unwrap_or_else(|| Version::new(0, 0, 0)), cargo_metadata, }) } pub fn probe_vcpkg(include_paths: Option<EnvList>, link_paths: Option<EnvList>, link_libs: Option<EnvList>) -> Result<Self>
pub fn probe_vcpkg_cmake(include_paths: Option<EnvList>, link_paths: Option<EnvList>, link_libs: Option<EnvList>) -> Result<Self> { eprintln!("=== Probing OpenCV library using vcpkg_cmake"); let mut config = vcpkg::Config::new(); config.cargo_metadata(false); // don't care about the error here, only need to have dlls copied to outdir PackageName::vcpkg().into_iter() .map(|pkg_name| config.find_package(&pkg_name)) .take_while(|r| r.is_err()) .count(); let vcpkg_root = canonicalize(vcpkg::find_vcpkg_root(&config)?)?; eprintln!("=== Discovered vcpkg root: {}", vcpkg_root.display()); let vcpkg_cmake = vcpkg_root.to_str() .and_then(|vcpkg_root| { glob(&format!("{}/downloads/tools/cmake*/*/bin/cmake", vcpkg_root)).ok() .and_then(|cmake_iter| glob(&format!("{}/downloads/tools/cmake*/*/bin/cmake.exe", vcpkg_root)).ok() .map(|cmake_exe_iter| cmake_iter.chain(cmake_exe_iter)) ) }) .and_then(|paths| paths.filter_map(|r| r.ok()) .filter_map(|p| canonicalize(p).ok()) .find(|p| p.is_file()) ); let vcpkg_ninja = vcpkg_root.to_str() .and_then(|vcpkg_root| { glob(&format!("{}/downloads/tools/ninja*/ninja", vcpkg_root)).ok() .and_then(|ninja_iter| glob(&format!("{}/downloads/tools/ninja*/*/ninja.exe", vcpkg_root)).ok() .map(|ninja_exe_iter| ninja_iter.chain(ninja_exe_iter)) ) }) .and_then(|paths| paths.filter_map(|r| r.ok()) .filter_map(|p| canonicalize(p).ok()) .find(|p| p.is_file()) ); let toolchain = vcpkg_root.join("scripts/buildsystems/vcpkg.cmake"); Self::probe_cmake(include_paths, link_paths, link_libs, Some(&toolchain), vcpkg_cmake.as_deref(), vcpkg_ninja.as_deref()) } pub fn probe_system(include_paths: Option<EnvList>, link_paths: Option<EnvList>, link_libs: Option<EnvList>) -> Result<Self> { let probe_paths = || Self::probe_from_paths(include_paths, link_paths, link_libs); let probe_pkg_config = || Self::probe_pkg_config(include_paths, link_paths, link_libs); let probe_cmake = || Self::probe_cmake(include_paths, link_paths, link_libs, None, None, None); let probe_vcpkg_cmake = || Self::probe_vcpkg_cmake(include_paths, link_paths, link_libs); let probe_vcpkg = || Self::probe_vcpkg(include_paths, link_paths, link_libs); let explicit_pkg_config = env::var_os("PKG_CONFIG_PATH").is_some() || env::var_os("OPENCV_PKGCONFIG_NAME").is_some(); let explicit_cmake = env::var_os("OpenCV_DIR").is_some() || env::var_os("OPENCV_CMAKE_NAME").is_some() || env::var_os("CMAKE_PREFIX_PATH").is_some() || env::var_os("OPENCV_CMAKE_BIN").is_some(); let explicit_vcpkg = env::var_os("VCPKG_ROOT").is_some() || cfg!(target_os = "windows"); eprintln!("=== Detected probe priority based on environment vars: pkg_config: {}, cmake: {}, vcpkg: {}", explicit_pkg_config, explicit_cmake, explicit_vcpkg ); let disabled_probes = env::var("OPENCV_DISABLE_PROBES"); let disabled_probes = disabled_probes.as_ref() .map(|s| EnvList::from(s.as_str()).iter().collect()) .unwrap_or_else(|_| HashSet::new()); let mut probes = [ ("environment", &probe_paths as &dyn Fn() -> Result<Self>), ("pkg_config", &probe_pkg_config), ("cmake", &probe_cmake), ("vcpkg_cmake", &probe_vcpkg_cmake), ("vcpkg", &probe_vcpkg), ]; let mut prioritize = |probe: &str, over: &str| { let (probe_idx, over_idx) = probes.iter().position(|(name, _)| name == &probe) .and_then(|probe_idx| probes.iter().position(|(name, _)| name == &over) .map(|over_idx| (probe_idx, over_idx)) ) .expect("Can't find probe to swap"); if probe_idx > over_idx { for i in (over_idx..probe_idx).rev() { probes.swap(i, i + 1); } } }; if explicit_pkg_config { if explicit_vcpkg && !explicit_cmake { prioritize("vcpkg_cmake", "cmake"); prioritize("vcpkg", "cmake"); } } else if explicit_cmake { prioritize("cmake", "pkg_config"); if explicit_vcpkg { prioritize("vcpkg_cmake", "pkg_config"); prioritize("vcpkg", "pkg_config"); } } else if explicit_vcpkg { prioritize("vcpkg_cmake", "pkg_config"); prioritize("vcpkg", "pkg_config"); } let probe_list = probes.iter() .map(|(name, _)| *name) .collect::<Vec<_>>() .join(", "); eprintln!("=== Probing the OpenCV library in the following order: {}", probe_list); let mut out = None; for &(name, probe) in &probes { if !disabled_probes.contains(name) { match probe() { Ok(lib) => { out = Some(lib); break; } Err(e) => { eprintln!("=== Can't probe using: {}, continuing with other methods because: {}", name, e); } } } else { eprintln!("=== Skipping probe: {} because of the environment configuration", name); } } out.ok_or_else(|| { let methods = probes.iter() .map(|&(name, _)| name) .filter(|&name| !disabled_probes.contains(name)) .collect::<Vec<_>>() .join(", "); format!("Failed to find OpenCV package using probes: {}", methods).into() }) } pub fn probe() -> Result<Self> { let include_paths = env::var("OPENCV_INCLUDE_PATHS").ok(); let include_paths = include_paths.as_deref().map(EnvList::from); let link_paths = env::var("OPENCV_LINK_PATHS").ok(); let link_paths = link_paths.as_deref().map(EnvList::from); let link_libs = env::var("OPENCV_LINK_LIBS").ok(); let link_libs = link_libs.as_deref().map(EnvList::from); Self::probe_system(include_paths, link_paths, link_libs) } pub fn emit_cargo_metadata(&self) { self.cargo_metadata.iter().for_each(|meta| { println!("{}", meta); }); } }
{ eprintln!("=== Probing OpenCV library using vcpkg"); let mut config = vcpkg::Config::new(); config.cargo_metadata(false); let mut errors = vec![]; let mut opencv = None; let possible_opencvs = PackageName::vcpkg().into_iter() .map(|pkg_name| config.find_package(&pkg_name)); for possible_opencv in possible_opencvs { match possible_opencv { Ok(possible_opencv) => { opencv = Some(possible_opencv); break; } Err(e) => { errors.push(e.to_string()); } } } let opencv = opencv.ok_or_else(|| errors.join(", "))?; let version = Self::version_from_include_paths(&opencv.include_paths); let include_paths = Self::process_env_var_list(include_paths, opencv.include_paths); let mut cargo_metadata = opencv.cargo_metadata; if link_paths.as_ref().map_or(false, |lp| !lp.is_extend()) { cargo_metadata = cargo_metadata.into_iter() .filter(|p| !p.starts_with("cargo:rustc-link-search=")) .collect(); } cargo_metadata.extend(Self::process_link_paths(link_paths, vec![], None)); if link_libs.as_ref().map_or(false, |ll| !ll.is_extend()) { cargo_metadata = cargo_metadata.into_iter() .filter(|p| !p.starts_with("cargo:rustc-link-lib=")) .collect(); } cargo_metadata.extend(Self::process_link_libs(link_libs, vec![], None)); Ok(Self { include_paths, version: version.unwrap_or_else(|| Version::new(0, 0, 0)), cargo_metadata, }) }
sign-releases.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Sign releases on github, make/upload ppa to launchpad.net NOTE on ppa: To build a ppa you may need to install some more packages. On ubuntu: sudo apt-get install devscripts libssl-dev python3-dev \ debhelper python3-setuptools dh-python NOTE on apk signing: To create a keystore and sign the apk you need to install java-8-openjdk, or java-7-openjdk on older systems. To create a keystore run the following command: mkdir ~/.jks && keytool -genkey -v -keystore ~/.jks/keystore \ -alias electrum.zcash.org -keyalg RSA -keysize 2048 \ -validity 10000 Then it shows a warning about the proprietary format and a command to migrate: keytool -importkeystore -srckeystore ~/.jks/keystore \ -destkeystore ~/.jks/keystore -deststoretype pkcs12 Manual signing: jarsigner -verbose \ -tsa http://sha256timestamp.ws.symantec.com/sha256/timestamp \ -sigalg SHA1withRSA -digestalg SHA1 \ -sigfile zcash-electrum \ -keystore ~/.jks/keystore \ Electrum_ZCASH-3.0.6.1-release-unsigned.apk \ electrum.zcash.org Zipalign from Android SDK build tools is also required (set path to bin in settings file or with key -z). To install: wget http://dl.google.com/android/android-sdk_r24-linux.tgz \ && tar xzf android-sdk_r24-linux.tgz \ && rm android-sdk_r24-linux.tgz \ && (while sleep 3; do echo "y"; done) \ | android-sdk-linux/tools/android update sdk -u -a -t \ 'tools, platform-tools-preview, build-tools-23.0.1' \ && (while sleep 3; do echo "y"; done) \ | android-sdk-linux/tools/android update sdk -u -a -t \ 'tools, platform-tools, build-tools-27.0.3' Manual zip aligning: android-sdk-linux/build-tools/27.0.3/zipalign -v 4 \ Electrum_ZCASH-3.0.6.1-release-unsigned.apk \ Zcash-Electrum-3.0.6.1-release.apk About script settings: Settings is read from options, then config file is read. If setting is already set from options, then it value does not changes. Config file can have one repo form or multiple repo form. In one repo form config settings read from root JSON object. Keys are "repo", "keyid", "token", "count", "sign_drafts", and others, which is corresponding to program options. Example: { "repo": "value" ... } In multiple repo form, if root "default_repo" key is set, then code try to read "repos" key as list and cycle through it to find suitable repo, or if no repo is set before, then "default_repo" is used to match. If match found, then that list object is used ad one repo form config. Example: { "default_repo": "value" "repos": [ { "repo": "value" ... } ] } """ import os import os.path import re import sys import time import getpass import shutil import hashlib import tempfile import json import zipfile from subprocess import check_call, CalledProcessError from functools import cmp_to_key from time import localtime, strftime try: import click import certifi import gnupg import dateutil.parser import colorama from colorama import Fore, Style from github_release import (get_releases, gh_asset_download, gh_asset_upload, gh_asset_delete, gh_release_edit) from urllib3 import PoolManager except ImportError as e: print('Import error:', e) print('To run script install required packages with the next command:\n\n' 'pip install githubrelease python-gnupg pyOpenSSL cryptography idna' ' certifi python-dateutil click colorama requests LinkHeader') sys.exit(1) HTTP = PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) FNULL = open(os.devnull, 'w') HOME_DIR = os.path.expanduser('~') CONFIG_NAME = '.sign-releases' SEARCH_COUNT = 1 SHA_FNAME = 'SHA256SUMS.txt' # make_ppa related definitions PPA_SERIES = { 'xenial': '16.04.1', 'bionic': '18.04.1', 'focal': '20.04.1', 'groovy': '20.10.1', 'hirsute': '21.04.1', } PEP440_PUBVER_PATTERN = re.compile('^((\d+)!)?' '((\d+)(\.\d+)*)' '([a-zA-Z]+\d+)?' '((\.[a-zA-Z]+\d+)*)$') REL_NOTES_PATTERN = re.compile('^#.+?(^[^#].+?)^#.+?', re.M | re.S) SDIST_NAME_PATTERN = re.compile('^Zcash-Electrum-(.*).tar.gz$') SDIST_DIR_TEMPLATE = 'Zcash-Electrum-{version}' PPA_SOURCE_NAME = 'electrum-zcash' PPA_ORIG_NAME_TEMPLATE = '%s_{version}.orig.tar.gz' % PPA_SOURCE_NAME CHANGELOG_TEMPLATE = """%s ({ppa_version}) {series}; urgency=medium {changes} -- {uid} {time}""" % PPA_SOURCE_NAME PPA_FILES_TEMPLATE = '%s_{0}{1}' % PPA_SOURCE_NAME LP_API_URL='https://api.launchpad.net/1.0' LP_SERIES_TEMPLATE = '%s/ubuntu/{0}' % LP_API_URL LP_ARCHIVES_TEMPLATE = '%s/~{user}/+archive/ubuntu/{ppa}' % LP_API_URL # sing_apk related definitions JKS_KEYSTORE = os.path.join(HOME_DIR, '.jks/keystore') JKS_ALIAS = 'electrum.dash.org' JKS_STOREPASS = 'JKS_STOREPASS' JKS_KEYPASS = 'JKS_KEYPASS' KEYTOOL_ARGS = ['keytool', '-list', '-storepass:env', JKS_STOREPASS] JARSIGNER_ARGS = [ 'jarsigner', '-verbose', '-tsa', 'http://sha256timestamp.ws.symantec.com/sha256/timestamp', '-sigalg', 'SHA1withRSA', '-digestalg', 'SHA1', '-sigfile', 'zcash-electrum', '-storepass:env', JKS_STOREPASS, '-keypass:env', JKS_KEYPASS, ] UNSIGNED_APK_PATTERN = re.compile('^Electrum_ZCASH(_Testnet)?-(.*)-release-unsigned.apk$') SIGNED_APK_TEMPLATE = 'Zcash-Electrum{testnet}-{version}-release.apk' os.environ['QUILT_PATCHES'] = 'debian/patches' def pep440_to_deb(version): """Convert PEP 440 public version to deb upstream version""" ver_match = PEP440_PUBVER_PATTERN.match(version) if not ver_match: raise Exception('Version "%s" does not comply with PEP 440' % version) g = ver_match.group deb_ver = '' deb_ver += ('%s:' % g(2)) if g(1) else '' deb_ver += g(3) deb_ver += ('~%s' % g(6)) if g(6) else '' deb_ver += ('%s' % g(7)) if g(7) else '' return deb_ver def compare_published_times(a, b): """Releases list sorting comparsion function (last published first)""" a = a['published_at'] b = b['published_at'] if not a and not b: return 0 elif not a: return -1 elif not b: return 1 a = dateutil.parser.parse(a) b = dateutil.parser.parse(b) if a > b: return -1 elif b > a: return 1 else: return 0 def sha256_checksum(filename, block_size=65536): """Gather sha256 hash on filename""" sha256 = hashlib.sha256() with open(filename, 'rb') as f: for block in iter(lambda: f.read(block_size), b''): sha256.update(block) return sha256.hexdigest() def read_config(): """Read and parse JSON from config file from HOME dir""" config_path = os.path.join(HOME_DIR, CONFIG_NAME) if not os.path.isfile(config_path): return {} try: with open(config_path, 'r') as f: data = f.read() return json.loads(data) except Exception as e: print('Error: Cannot read config file:', e) return {}
def get_next_ppa_num(ppa, source_package_name, ppa_upstr_version, series_name): """Calculate next ppa num (if older ppa versions whas published earlier)""" user, ppa_name = ppa.split('/') archives_url = LP_ARCHIVES_TEMPLATE.format(user=user, ppa=ppa_name) series_url = LP_SERIES_TEMPLATE.format(series_name) query = { 'ws.op': 'getPublishedSources', 'distro_series': series_url, 'order_by_date': 'true', 'source_name': source_package_name, } resp = HTTP.request('GET', archives_url, fields=query) if resp.status != 200: raise Exception('Launchpad API error %s %s', (resp.status, resp.reason)) data = json.loads(resp.data.decode('utf-8')) entries = data['entries'] if len(entries) == 0: return 1 for e in entries: ppa_version = e['source_package_version'] version_match = re.match('%s-0ppa(\d+)~ubuntu' % ppa_upstr_version, ppa_version) if version_match: return int(version_match.group(1)) + 1 return 1 class ChdirTemporaryDirectory(object): """Create tmp dir, chdir to it and remove on exit""" def __enter__(self): self.prev_wd = os.getcwd() self.name = tempfile.mkdtemp() os.chdir(self.name) return self.name def __exit__(self, exc_type, exc_value, traceback): os.chdir(self.prev_wd) shutil.rmtree(self.name) class SignApp(object): def __init__(self, **kwargs): """Get app settings from options, from curdir git, from config file""" ask_passphrase = kwargs.pop('ask_passphrase', None) self.sign_drafts = kwargs.pop('sign_drafts', False) self.force = kwargs.pop('force', False) self.tag_name = kwargs.pop('tag_name', None) self.repo = kwargs.pop('repo', None) self.ppa = kwargs.pop('ppa', None) self.ppa_upstream_suffix = kwargs.pop('ppa_upstream_suffix', None) self.token = kwargs.pop('token', None) self.keyid = kwargs.pop('keyid', None) self.count = kwargs.pop('count', None) self.dry_run = kwargs.pop('dry_run', False) self.no_ppa = kwargs.pop('no_ppa', False) self.only_ppa = kwargs.pop('only_ppa', False) self.verbose = kwargs.pop('verbose', False) self.jks_keystore = kwargs.pop('jks_keystore', False) self.jks_alias = kwargs.pop('jks_alias', False) self.zipalign_path = kwargs.pop('zipalign_path', False) self.config = {} config_data = read_config() default_repo = config_data.get('default_repo', None) if default_repo: if not self.repo: self.repo = default_repo for config in config_data.get('repos', []): config_repo = config.get('repo', None) if config_repo and config_repo == self.repo: self.config = config break else: self.config = config_data if self.config: self.repo = self.repo or self.config.get('repo', None) self.ppa = self.ppa or self.config.get('ppa', None) self.token = self.token or self.config.get('token', None) self.keyid = self.keyid or self.config.get('keyid', None) self.count = self.count or self.config.get('count', None) \ or SEARCH_COUNT self.sign_drafts = self.sign_drafts \ or self.config.get('sign_drafts', False) self.no_ppa = self.no_ppa \ or self.config.get('no_ppa', False) self.verbose = self.verbose or self.config.get('verbose', None) self.jks_keystore = self.jks_keystore \ or self.config.get('jks_keystore', JKS_KEYSTORE) self.jks_alias = self.jks_alias \ or self.config.get('jks_alias', JKS_ALIAS) self.zipalign_path = self.zipalign_path \ or self.config.get('zipalign_path', None) if self.only_ppa and not self.tag_name: print('need --tag-name, when using --only-ppa, exit') sys.exit(1) if self.only_ppa and self.no_ppa: print('someting one required: --no-ppa or --only-ppa, exit') sys.exit(1) if not self.repo: print('no repo found, exit') sys.exit(1) if self.token: os.environ['GITHUB_TOKEN'] = self.token if not os.environ.get('GITHUB_TOKEN', None): print('GITHUB_TOKEN environment var not set, exit') sys.exit(1) if self.keyid: self.keyid = self.keyid.split('/')[-1] self.passphrase = None self.gpg = gnupg.GPG() if not self.keyid: print('no keyid set, exit') sys.exit(1) keylist = self.gpg.list_keys(True, keys=[self.keyid]) if not keylist: print('no key with keyid %s found, exit' % self.keyid) sys.exit(1) self.uid = ', '.join(keylist[0].get('uids', ['No uid found'])) if ask_passphrase: while not self.passphrase: self.read_passphrase() elif not self.check_key(): while not self.passphrase: self.read_passphrase() if self.zipalign_path and not self.only_ppa: try: check_call(self.zipalign_path, stderr=FNULL) except CalledProcessError: pass self.read_jks_storepass() self.read_jks_keypass() def read_jks_storepass(self): """Read JKS storepass and keypass""" while not JKS_STOREPASS in os.environ: storepass = getpass.getpass('%sInput %s keystore password:%s ' % (Fore.GREEN, self.jks_keystore, Style.RESET_ALL)) os.environ[JKS_STOREPASS] = storepass try: check_call(KEYTOOL_ARGS + ['-keystore', self.jks_keystore], stdout=FNULL, stderr=FNULL) except CalledProcessError: print('%sWrong keystore password%s' % (Fore.RED, Style.RESET_ALL)) del os.environ[JKS_STOREPASS] def read_jks_keypass(self): while not JKS_KEYPASS in os.environ: keypass = getpass.getpass('%sInput alias password for <%s> ' '[Enter if same as for keystore]:%s ' % (Fore.YELLOW, self.jks_alias, Style.RESET_ALL)) if not keypass: os.environ[JKS_KEYPASS] = os.environ[JKS_STOREPASS] else: os.environ[JKS_KEYPASS] = keypass with ChdirTemporaryDirectory() as tmpdir: test_file = 'testfile.txt' test_zipfile = 'testzip.zip' with open(test_file, 'w') as fdw: fdw.write('testcontent') test_zf = zipfile.ZipFile(test_zipfile, mode='w') test_zf.write(test_file) test_zf.close() sign_args = ['-keystore', self.jks_keystore, test_zipfile, self.jks_alias] try: check_call(JARSIGNER_ARGS + sign_args, stdout=FNULL) except CalledProcessError: print('%sWrong key alias password%s' % (Fore.RED, Style.RESET_ALL)) del os.environ[JKS_KEYPASS] def read_passphrase(self): """Read passphrase for gpg key until check_key is passed""" passphrase = getpass.getpass('%sInput passphrase for Key: %s %s:%s ' % (Fore.GREEN, self.keyid, self.uid, Style.RESET_ALL)) if self.check_key(passphrase): self.passphrase = passphrase def check_key(self, passphrase=None): """Try to sign test string, and if some data signed retun True""" signed_data = self.gpg.sign('test message to check passphrase', keyid=self.keyid, passphrase=passphrase) if signed_data.data and self.gpg.verify(signed_data.data).valid: return True print('%sWrong passphrase!%s' % (Fore.RED, Style.RESET_ALL)) return False def sign_file_name(self, name, detach=True): """Sign file with self.keyid, place signature in deteached .asc file""" with open(name, 'rb') as fdrb: signed_data = self.gpg.sign_file(fdrb, keyid=self.keyid, passphrase=self.passphrase, detach=detach) with open('%s.asc' % name, 'wb') as fdw: fdw.write(signed_data.data) def sign_release(self, release, other_names, asc_names, is_newest_release): """Download/sign unsigned assets, upload .asc counterparts. Create SHA256SUMS.txt with all assets included and upload it with SHA256SUMS.txt.asc counterpart. """ repo = self.repo tag = release.get('tag_name', None) if not tag: print('Release have no tag name, skip release\n') return with ChdirTemporaryDirectory() as tmpdir: with open(SHA_FNAME, 'w') as fdw: sdist_match = None for name in other_names: if name == SHA_FNAME: continue if not self.no_ppa: sdist_match = sdist_match \ or SDIST_NAME_PATTERN.match(name) if self.only_ppa and not sdist_match: continue gh_asset_download(repo, tag, name) if self.only_ppa: break apk_match = UNSIGNED_APK_PATTERN.match(name) if apk_match: unsigned_name = name name = self.sign_apk(unsigned_name, apk_match.group(1), apk_match.group(2)) gh_asset_upload(repo, tag, name, dry_run=self.dry_run) gh_asset_delete(repo, tag, unsigned_name, dry_run=self.dry_run) if not '%s.asc' % name in asc_names or self.force: self.sign_file_name(name) if self.force: gh_asset_delete(repo, tag, '%s.asc' % name, dry_run=self.dry_run) gh_asset_upload(repo, tag, '%s.asc' % name, dry_run=self.dry_run) sumline = '%s %s\n' % (sha256_checksum(name), name) fdw.write(sumline) if not self.only_ppa: self.sign_file_name(SHA_FNAME, detach=False) gh_asset_delete(repo, tag, '%s.asc' % SHA_FNAME, dry_run=self.dry_run) gh_asset_upload(repo, tag, '%s.asc' % SHA_FNAME, dry_run=self.dry_run) if sdist_match and is_newest_release: self.make_ppa(sdist_match, tmpdir, tag) def sign_apk(self, unsigned_name, testnet, version): """Sign unsigned release apk""" if not (JKS_STOREPASS in os.environ and JKS_KEYPASS in os.environ): raise Exception('Found unsigned apk and no zipalign path set') testnet = '-Testnet' if testnet else '' name = SIGNED_APK_TEMPLATE.format(testnet=testnet, version=version) print('Signing apk: %s' % name) apk_args = ['-keystore', self.jks_keystore, unsigned_name, self.jks_alias] if self.verbose: check_call(JARSIGNER_ARGS + apk_args) check_call([self.zipalign_path, '-v', '4', unsigned_name, name]) else: check_call(JARSIGNER_ARGS + apk_args, stdout=FNULL) check_call([self.zipalign_path, '-v', '4', unsigned_name, name], stdout=FNULL) return name def make_ppa(self, sdist_match, tmpdir, tag): """Build, sign and upload dsc to launchpad.net ppa from sdist.tar.gz""" repo = self.repo with ChdirTemporaryDirectory() as ppa_tmpdir: sdist_name = sdist_match.group(0) version = sdist_match.group(1) ppa_upstr_version = pep440_to_deb(version) ppa_upstream_suffix = self.ppa_upstream_suffix if ppa_upstream_suffix: ppa_upstr_version += ('+%s' % ppa_upstream_suffix) ppa_orig_name = PPA_ORIG_NAME_TEMPLATE.format( version=ppa_upstr_version) series = list(map(lambda x: x[0], sorted(PPA_SERIES.items(), key=lambda x: x[1]))) sdist_dir = SDIST_DIR_TEMPLATE.format(version=version) sdist_dir = os.path.join(ppa_tmpdir, sdist_dir) debian_dir = os.path.join(sdist_dir, 'debian') changelog_name = os.path.join(debian_dir, 'changelog') relnotes_name = os.path.join(sdist_dir, 'RELEASE-NOTES') print('Found sdist: %s, version: %s' % (sdist_name, version)) print(' Copying sdist to %s, extracting' % ppa_orig_name) shutil.copy(os.path.join(tmpdir, sdist_name), os.path.join(ppa_tmpdir, ppa_orig_name)) check_call(['tar', '-xzvf', ppa_orig_name], stdout=FNULL) with open(relnotes_name, 'r') as rnfd: changes = rnfd.read() changes_match = REL_NOTES_PATTERN.match(changes) if changes_match and len(changes_match.group(1)) > 0: changes = changes_match.group(1).split('\n') for i in range(len(changes)): if changes[i] == '': continue elif changes[i][0] != ' ': changes[i] = ' %s' % changes[i] elif len(changes[i]) > 1 and changes[i][1] != ' ': changes[i] = ' %s' % changes[i] changes = '\n'.join(changes) else: changes = '\n * Porting to ppa\n\n' if not self.dry_run and not self.only_ppa: gh_release_edit(repo, tag, name=version) gh_release_edit(repo, tag, body=changes) os.chdir(sdist_dir) print(' Making PPAs for series: %s' % (', '.join(series))) now_formatted = strftime('%a, %d %b %Y %H:%M:%S %z', localtime()) for s in series: ppa_num = get_next_ppa_num(self.ppa, PPA_SOURCE_NAME, ppa_upstr_version, s) rel_version = PPA_SERIES[s] ppa_version = '%s-0ppa%s~ubuntu%s' % (ppa_upstr_version, ppa_num, rel_version) ppa_dsc = os.path.join(ppa_tmpdir, PPA_FILES_TEMPLATE.format( ppa_version, '.dsc')) ppa_chgs = os.path.join(ppa_tmpdir, PPA_FILES_TEMPLATE.format( ppa_version, '_source.changes')) changelog = CHANGELOG_TEMPLATE.format(ppa_version=ppa_version, series=s, changes=changes, uid=self.uid, time=now_formatted) with open(changelog_name, 'w') as chlfd: chlfd.write(changelog) print(' Make %s ppa, Signing with key: %s, %s' % (ppa_version, self.keyid, self.uid)) if self.verbose: check_call(['debuild', '-S']) else: check_call(['debuild', '-S'], stdout=FNULL) print(' Upload %s ppa to %s' % (ppa_version, self.ppa)) if self.dry_run: print(' Dry run: dput ppa:%s %s' % (self.ppa, ppa_chgs)) else: check_call(['dput', ('ppa:%s' % self.ppa), ppa_chgs], stdout=FNULL) print('\n') def search_and_sign_unsinged(self): """Search through last 'count' releases with assets without .asc counterparts or releases withouth SHA256SUMS.txt.asc """ if self.only_ppa: print('Make lanuchpad PPA on repo: %s' % self.repo) else: print('Sign releases on repo: %s' % self.repo) print(' With key: %s, %s\n' % (self.keyid, self.uid)) releases = get_releases(self.repo) if self.tag_name: releases = [r for r in releases if r.get('tag_name', None) == self.tag_name] if len(releases) == 0: print('No release with tag "%s" found, exit' % self.tag_name) sys.exit(1) elif not self.sign_drafts: releases = [r for r in releases if not r.get('draft', False)] # cycle through releases sorted by by publication date releases.sort(key=cmp_to_key(compare_published_times)) for r in releases[:self.count]: tag_name = r.get('tag_name', 'No tag_name') is_draft = r.get('draft', False) is_prerelease = r.get('prerelease', False) created_at = r.get('created_at', '') msg = 'Found %s%s tagged: %s, created at: %s' % ( 'draft ' if is_draft else '', 'prerelease' if is_prerelease else 'release', tag_name, created_at ) if not is_draft: msg += ', published at: %s' % r.get('published_at', '') print(msg) asset_names = [a['name'] for a in r['assets']] if not asset_names: print(' No assets found, skip release\n') continue asc_names = [a for a in asset_names if a.endswith('.asc')] other_names = [a for a in asset_names if not a.endswith('.asc')] need_to_sign = False if asset_names and not asc_names: need_to_sign = True if not need_to_sign: for name in other_names: if not '%s.asc' % name in asc_names: need_to_sign = True break if not need_to_sign: need_to_sign = '%s.asc' % SHA_FNAME not in asc_names if need_to_sign or self.force or self.only_ppa: self.sign_release(r, other_names, asc_names, r==releases[0]) else: print(' Seems already signed, skip release\n') CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.command(context_settings=CONTEXT_SETTINGS) @click.option('-a', '--jks-alias', help='jks key alias') @click.option('-c', '--count', type=int, help='Number of recently published releases to sign') @click.option('-d', '--sign-drafts', is_flag=True, help='Sing draft releases first') @click.option('-f', '--force', is_flag=True, help='Sing already signed releases') @click.option('-g', '--tag-name', help='Sing only release tagged with tag name') @click.option('-k', '--keyid', help='gnupg keyid') @click.option('-K', '--jks-keystore', help='jks keystore path') @click.option('-l', '--ppa', help='PPA in format uzername/ppa') @click.option('-S', '--ppa-upstream-suffix', help='upload upstream source with version suffix (ex p1)') @click.option('-L', '--no-ppa', is_flag=True, help='Do not make launchpad PPA') @click.option('-n', '--dry-run', is_flag=True, help='Do not uload signed files') @click.option('-p', '--ask-passphrase', is_flag=True, help='Ask to enter passphrase') @click.option('-P', '--only-ppa', is_flag=True, help='Only make launchpad PPA (need --tag-name)') @click.option('-r', '--repo', help='Repository in format username/reponame') @click.option('-s', '--sleep', type=int, help='Sleep number of seconds before signing') @click.option('-t', '--token', help='GigHub access token, to be set as' ' GITHUB_TOKEN environmet variable') @click.option('-v', '--verbose', is_flag=True, help='Make more verbose output') @click.option('-z', '--zipalign-path', help='zipalign path') def main(**kwargs): app = SignApp(**kwargs) sleep = kwargs.pop('sleep', None) if (sleep): print('Sleep for %s seconds' % sleep) time.sleep(sleep) app.search_and_sign_unsinged() if __name__ == '__main__': colorama.init() main()
bubble_sort.py
# -*- coding: utf-8 -*- """ Created on Thu Mar 3 00:20:07 2022 @author: sachi """ # Bubble sort in Python def
(array): for i in range(len(array)): for j in range(0, len(array) - i - 1): if array[j] > array[j + 1]: temp = array[j] array[j] = array[j+1] array[j+1] = temp data = [] n=int(input("Number of elemnts in an array:")) for i in range(0,n): l=int(input()) data.append(l) bubbleSort(data) print('Sorted Array is:') print(data)
bubbleSort