file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
message_children.rs
|
// Copyright 2020-2021 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
use crate::{
endpoints::{
config::ROUTE_MESSAGE_CHILDREN, filters::with_tangle, path_params::message_id, permission::has_permission,
storage::StorageBackend,
},
types::{body::SuccessBody, responses::MessageChildrenResponse},
};
use bee_message::MessageId;
use bee_runtime::resource::ResourceHandle;
use bee_tangle::MsTangle;
use warp::{Filter, Rejection, Reply};
use std::{iter::FromIterator, net::IpAddr};
fn path() -> impl Filter<Extract = (MessageId,), Error = warp::Rejection> + Clone {
super::path()
.and(warp::path("messages"))
.and(message_id())
.and(warp::path("children"))
.and(warp::path::end())
}
pub(crate) fn filter<B: StorageBackend>(
public_routes: Vec<String>,
allowed_ips: Vec<IpAddr>,
tangle: ResourceHandle<MsTangle<B>>,
) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
self::path()
.and(warp::get())
.and(has_permission(ROUTE_MESSAGE_CHILDREN, public_routes, allowed_ips))
.and(with_tangle(tangle))
.and_then(message_children)
}
pub async fn message_children<B: StorageBackend>(
message_id: MessageId,
tangle: ResourceHandle<MsTangle<B>>,
) -> Result<impl Reply, Rejection> {
let mut children = Vec::from_iter(tangle.get_children(&message_id).await.unwrap_or_default());
let count = children.len();
let max_results = 1000;
children.truncate(max_results);
Ok(warp::reply::json(&SuccessBody::new(MessageChildrenResponse {
message_id: message_id.to_string(),
max_results,
count,
children_message_ids: children.iter().map(|id| id.to_string()).collect(),
})))
|
}
|
|
stopDaemon.js
|
const {promisify} = require('util');
const {stopDaemon} = require('./');
/** Stop the Lightning daemon
{
lnd: <LND GRPC API Object>
}
|
*/
module.exports = promisify(stopDaemon);
|
|
autogen.go
|
/*
Copyright © 2019 Shi Han NG <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package cmd
import (
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/cockroachdb/errors"
"github.com/shihanng/gig/internal/file"
"github.com/spf13/cobra"
"github.com/src-d/enry/v2"
)
func newAutogenCmd(c *command) *cobra.Command {
return &cobra.Command{
Use: "autogen",
Short: "(EXPERIMENTAL) I'm Feeling Lucky",
Long: `(EXPERIMENTAL) Automatically generate .gitignore based on the content of the current directory.
This feature uses github.com/src-d/enry
(which is a port of GitHub's linguist library) to detect
the programming languages in the current working directory
and pass that to the gen command. Known limitation of this
feature is that it could not detect a framework.`,
RunE: c.autogenRunE,
}
}
// Heavily borrowed from:
// https://github.com/src-d/enry/blob/697929e1498cbdb7726a4d3bf4c48e706ee8c967/cmd/enry/main.go#L27
func (c *command) autogenRunE(cmd *cobra.Command, args []string) error { // nolint:cyclop
templates, err := file.List(c.templatePath())
if err != nil {
return err
}
supported := make(map[string]bool, len(templates))
for _, t := range templates {
supported[file.Canon(t)] = true
}
var found []string
errWalk := filepath.Walk(".", func(path string, f os.FileInfo, err error) error {
if err != nil {
return filepath.SkipDir
}
if !f.Mode().IsDir() && !f.Mode().IsRegular() {
return nil
}
if enry.IsVendor(path) ||
enry.IsDotFile(path) ||
enry.IsDocumentation(path) ||
enry.IsConfiguration(path) {
if f.IsDir() {
return filepath.SkipDir
}
return nil
}
if f.IsDir() {
return nil
}
//nolint:gomnd
content, err := readFile(path, 16*1024)
if err != nil {
return err
}
language := enry.GetLanguage(path, content)
if language == enry.OtherLanguage {
return nil
}
if supported[file.Canon(language)] {
found = append(found, language)
}
return nil
})
if errWalk != nil {
return errors.Wrap(err, "cmd: walking file")
}
return c.generateIgnoreFile(found)
}
func r
|
path string, limit int64) ([]byte, error) {
if limit <= 0 {
b, err := ioutil.ReadFile(path)
return b, errors.Wrap(err, "cmd: read file")
}
f, err := os.Open(path)
if err != nil {
return nil, errors.Wrap(err, "cmd: open file")
}
defer f.Close()
st, err := f.Stat()
if err != nil {
return nil, errors.Wrap(err, "cmd: get file stat")
}
size := st.Size()
if limit > 0 && size > limit {
size = limit
}
buf := bytes.NewBuffer(nil)
buf.Grow(int(size))
_, err = io.Copy(buf, io.LimitReader(f, limit))
return buf.Bytes(), errors.Wrap(err, "cmd: copy to buffer")
}
|
eadFile(
|
exponential.rs
|
//! This module is used to convert a continuous second order differential equation into a discretized state space model
//!
//! A continuous second order differential equation is given as $$\ddot q + 2\omega\zeta\dot q + \omega^2 q = \vec b\cdot \vec u$$
//! with the output $$\vec y=q\vec c$$
//! The ODE can be written as the state space model:
//! $$
//! \dot x = Ax + Bu
//! $$
//! $$
//! y = Cx
//! $$
//! where
//! ```math
//! x = \begin{bmatrix}
//! q \\
//! \dot q
//! \end{bmatrix},
//! A = \begin{bmatrix}
//! 0 & 1 \\
//! -\omega^2 & -2\omega\zeta
//! \end{bmatrix}
//! ,
//! B = \begin{bmatrix}
//! \vec 0 \\
//! \vec b
//! \end{bmatrix}
//! ,
//! C = \begin{bmatrix}
//! \vec c & \vec 0
//! \end{bmatrix}
//! ```
//! The continuous state space model is transformed into a discrete state space model
//! $$
//! x[k+1] = A_d x\[k\] + B_d u\[k\]
//! $$
//! $$
//! y\[k\] = C_d x\[k\]
//! $$
//! where
//! $$ A_d = \exp(A\tau),$$
//! $$ B_d = A^{-1}(A_d-I)B,$$
//! $$ C_d = C$$
//! and $`\tau`$ is the sample time.
//!
//! [$`A_d = \exp(A\tau)`$](https://www.wolframalpha.com/input/?i=Matrixexp%5B%7B%7B0%2Ct%7D%2C%7B-tx%5E2%2C-2txy%7D%7D%5D)=
//! ```math
//! A_d = \begin{bmatrix}
//! {\alpha_+\beta_- + \alpha_-\beta_+ \over 2z} & {\beta_- - \beta_+ \over 2z} \\
//! {x^2 (\beta_+ - \beta_-) \over 2z} & {\alpha_-\beta_- + \alpha_+\beta_+ \over 2z}
//! \end{bmatrix}
//! ```
//! [$`A^{-1}`$](https://www.wolframalpha.com/input/?i=inverse+%7B%7B0%2C+1%7D%2C+%7B-x%5E2%2C+-2yx%7D%7D)=
//! ```math
//! A^{-1} = \begin{bmatrix}
//! -2yx^{-1} & -x^{-2} \\
//! 1 & 0
//! \end{bmatrix}
//! ```
//! with $`x=\omega`$, $`y=\zeta`$, $`z=x^2\sqrt{y^2-1}`$, $`\alpha_-=z-xy`$, $`\alpha_+=z+xy`$, $`\beta_-=\exp(\tau\alpha_-)`$, $`\beta_+=\exp(-\tau\alpha_+)`$
//!
// https://en.wikipedia.org/wiki/Discretization
// https://www.wolframalpha.com/input/?i=inverse+%7B%7B0%2C+1%7D%2C+%7B-x%5E2%2C+-2yx%7D%7D
// https://www.wolframalpha.com/input/?i=Matrixexp%5B%7B%7B0%2Ct%7D%2C%7B-tx%5E2%2C-2txy%7D%7D%5D
use nalgebra::Matrix2;
use num_complex::Complex;
use serde::Serialize;
use std::fmt;
/// This structure is used to convert a continuous 2nd order ODE into a discrete state space model
#[derive(Debug, Serialize, Clone, Default, PartialEq)]
pub struct Exponential {
/// Sampling time is second
pub tau: f64,
q: (f64, f64, f64, f64),
m: (f64, f64),
b: Vec<f64>,
c: Vec<f64>,
/// State space model output vector
pub y: Vec<f64>,
x: (f64, f64),
}
impl Exponential {
pub fn n_inputs(&self) -> usize {
self.b.len()
}
pub fn n_outputs(&self) -> usize {
self.c.len()
}
}
impl super::Solver for Exponential {
/// Creates a discrete state space model from a 2nd order ODE
///
/// Creates a new structure from the sampling time $`\tau`$, the eigen frequency $`\omega`$ in radians, the damping coefficient $`\zeta`$ and the vectors $`b`$ and $`c`$ that converts a input vector to a modal coefficient and a model coefficient to an output vector, respectively
fn from_second_order(
tau: f64,
omega: f64,
zeta: f64,
continuous_bb: Vec<f64>,
continuous_cc: Vec<f64>,
) -> Self
|
/// Returns the state space model output
fn solve(&mut self, u: &[f64]) -> &[f64] {
let (x0, x1) = self.x;
//let s = self.m.0 * x0 + self.m.1 * x1;
self.y.iter_mut().zip(self.c.iter()).for_each(|(y, c)| {
*y = c * x0;
});
let v = self.b.iter().zip(u).fold(0., |s, (b, u)| s + b * u);
self.x.0 = self.q.0 * x0 + self.q.1 * x1 + self.m.0 * v;
self.x.1 = self.q.2 * x0 + self.q.3 * x1 + self.m.1 * v;
self.y.as_slice()
}
}
impl fmt::Display for Exponential {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"2x2 discrete state space model: {}->{} ({:.3}Hz)\n - A: {:.9?}\n - B: {:.9?}",
self.b.len(),
self.c.len(),
self.tau.recip(),
self.q,
self.m
)
}
}
|
{
/*
let aa = Matrix2::<f64>::new(0., 1., -omega * omega, -2. * omega * zeta);
let i = Matrix2::<f64>::identity();
let qp = i + aa * (0.5 * tau);
let iqm = (i - aa * (0.5 * tau)).try_inverse().unwrap();
let q = (qp * iqm).as_slice().to_owned();
let m = (iqm * tau.sqrt()).as_slice().to_owned();
*/
let i = Matrix2::<f64>::identity();
let n = continuous_cc.len();
if omega == 0f64 {
Self {
tau,
q: (1f64, tau, 0f64, 1f64),
m: (0.5 * tau * tau, tau),
b: continuous_bb,
c: continuous_cc,
y: vec![0.; n],
x: (0f64, 0f64),
}
} else {
let x = Complex { re: omega, im: 0. };
let y = Complex { re: zeta, im: 0. };
let ia = Matrix2::new((-2. * y / x).re, -1. / (x * x).re, 1., 0.);
let z = (x * x * (y * y - 1.)).sqrt();
let zmxy = z - x * y;
let zpxy = z + x * y;
let ezmxy = (tau * zmxy).exp();
let ezpxy = (-tau * zpxy).exp();
let ad = Matrix2::new(
((zpxy * ezmxy + zmxy * ezpxy) / (2. * z)).re,
((ezmxy - ezpxy) / (2. * z)).re,
(x * x * (ezpxy - ezmxy) / (2. * z)).re,
((zmxy * ezmxy + zpxy * ezpxy) / (2. * z)).re,
);
let bd_ = ia * (ad - i); // / tau.sqrt();
Self {
tau,
q: (ad[0], ad[2], ad[1], ad[3]),
m: (bd_[2], bd_[3]),
b: continuous_bb,
c: continuous_cc,
y: vec![0.; n],
x: (0f64, 0f64),
}
}
}
|
passage_content.rs
|
use crate::ScriptContent;
use crate::StoryData;
use crate::StoryTitle;
use crate::StylesheetContent;
use crate::TwineContent;
/// An enum of the types of content that can be inside a [`Passage`]
///
/// [`Passage`]: struct.Passage.html
#[derive(Debug)]
pub enum PassageContent {
/// A non-special passage that contains Twine content
Normal(TwineContent),
/// A passage that contains the title of the story
StoryTitle(StoryTitle),
/// A passage that contains the story data defined by the specification
StoryData(Option<StoryData>),
/// A passage that is tagged with `script` and contains a script
Script(ScriptContent),
/// A passage that is tagged with `stylesheet` and contains CSS
Stylesheet(StylesheetContent),
}
impl std::convert::From<TwineContent> for PassageContent {
fn from(p: TwineContent) -> PassageContent {
PassageContent::Normal(p)
}
}
impl std::convert::From<StoryTitle> for PassageContent {
fn from(t: StoryTitle) -> PassageContent {
PassageContent::StoryTitle(t)
}
}
impl std::convert::From<Option<StoryData>> for PassageContent {
fn from(d: Option<StoryData>) -> PassageContent {
PassageContent::StoryData(d)
}
}
impl std::convert::From<StoryData> for PassageContent {
fn from(d: StoryData) -> PassageContent {
PassageContent::StoryData(Some(d))
}
}
impl std::convert::From<ScriptContent> for PassageContent {
fn from(s: ScriptContent) -> PassageContent {
PassageContent::Script(s)
}
}
impl std::convert::From<StylesheetContent> for PassageContent {
fn from(s: StylesheetContent) -> PassageContent
|
}
|
{
PassageContent::Stylesheet(s)
}
|
pc_addurl.py
|
from adguardhome import AdGuardHome
from keys import newpass,newuser
import asyncio
# variable de nombre y url
|
async def main():
async with AdGuardHome("172.16.10.199",password=newpass,port=80,username=newuser) as adguard:
bloquear = await adguard.filtering.add_url(nuevaLista, urlLista)
# bloquear = await adguard.filtering.enable_url(url=urlLista)
# refrescar = await adguard.filtering.refresh()
print("Lista PC enable", bloquear)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
nuevaLista = "pc block"
urlLista = "https://raw.githubusercontent.com/manolixgt/agh_blocking_python/main/blocklists/pc_block.txt"
|
model_price.go
|
/*
* Nomics Cryptocurrency & Bitcoin API
*
* # Introduction Welcome to the Nomics Cryptocurrency & Bitcoin API. To sign up for an API key please [go here](https://p.nomics.com/cryptocurrency-bitcoin-api/). [nomics.com](https://nomics.com) is built entirely with the Nomics API. Everything we've done on [nomics.com](https://nomics.com) you can do with our API. There are no internal API endpoints. If you need support, reach out to use at our [forums](https://forums.nomics.com/). # General ## API Server URL The Nomics API runs at `https://api.nomics.com/v1`. All requests should be prefixed by the server URL. ## JSON and CSV Support By default, all endpoints serve data as JSON. However, by passing `format=csv` in the URL, some endpoints will return CSV data. This can be used in Google Sheets via the `IMPORTDATA` function. CSV responses will not contain a header row, this is so that data can be easily concatenated from multiple requests. The fields will be rendered in the same order as the JSON fields. See the endpoint's documentation for an example. Not all endpoints support CSV. Endpoints that support CSV will have the `format` parameter in the parameters section. ## Errors The Nomics API uses standard HTTP status codes to indicate success or failure. 200 represents success, 4xx represents a user error (such as a problem with your key), and 5xx represents a problem with our API. ## Versioning We follow Semantic Versioning. That means our API is versioned as Major.Minor.Patch. For example, Version 1.2.3 has major version 1, minor version 2, and patch version 3. Major version changes indicate that we have altered the API significantly and it is no longer compatible with a previous version. Major versions are also used as the API URL prefix. When we update the major version, we will not remove the previous version without notice to API customers and a deprecation period to allow everyone to smoothly update to the new version. Minor version changes indicate that we have added new functionality without breaking any existing functionality. An API client is compatible with future minor versions. Note that a minor version update may add a new field to an existing API endpoint's response. Your API client must ignore fields it does not understand in order to be compatible with future minor versions. Patch version changes indicate we fixed a bug or security vulnerability. Patch versions don't add new functionality. ## Cross Origin Resource Sharing (CORS) This API supports Cross Origin Resource Sharing, which allows you to make API requests directly from your user's browser. To use CORS, you must provide Nomics with the domains on which your application will run so that we can whitelist them for CORS access. Requests from `localhost`, `127.0.0.1`, and `0.0.0.0` will always succeed to aid in development. ## Demo Application A demo application using the Nomics API, CORS, and React is available on Glitch.com. This can help you get started using the Nomics API. Keep in mind it uses the demo key, which is rotated frequently. You should get your own API key before deploying an app to production. Check it out: <div class=\"glitch-embed-wrap\" style=\"height: 420px; width: 100%;\"> <iframe src=\"https://glitch.com/embed/#!/embed/nomics-api-demo?path=README.md\" alt=\"nomics-api-demo on glitch\" style=\"height: 100%; width: 100%; border: 0;\"></iframe> </div> ## Demo Spreadsheet Here is a demo of using the Nomics API with Google Sheets. <iframe width=\"100%\" height=\"400px\" src=\"https://docs.google.com/spreadsheets/d/e/2PACX-1vShn2iWjvqQ0ueBa9l9g1UBYVM92OZSgZ4nmp0rWuykvHPrvyMyMeSN4r0Orj0ACEIIKdCz6cc5abCw/pubhtml?widget=true&headers=false\"></iframe> ### Formulas * A2: `=IMPORTDATA(\"https://api.nomics.com/v1/prices?key=your-key-here&format=csv\")` * Column F: `=LOOKUP(D2,A:A,B:B)` finds D2 (BTC) in column A and pulls the price from column B * Column G: `=E2*F2` * Column H: `=G2/I$2` * Column I: `=SUM(G:G)` # SDKs and Libraries ## By Nomics - [Nomics JavaScript Client](https://github.com/nomics-crypto/nomics-javascript) ## Community Submissions - [Nomics.com Swift SDK](https://forums.nomics.com/t/swift-sdk-supporting-ios-macos-tvos-and-watchos/) by Nick DiZazzo - [Nomics Node.js Library](https://forums.nomics.com/t/i-made-a-library-for-node-js/) by mikunimaru - [Nomics Python Wrapper](https://forums.nomics.com/t/python-package-for-nomics-api/119) by Taylor Facen - [Python Wrapper for Nomics](https://github.com/AviFelman/py-nomics) by Avi Felman We love watching developers explore new use-cases with our API. Whether you're tinkering on a small side project or building an open-source resource, please share what you're up to in our [forums](https://forums.nomics.com/). # Authentication <!-- ReDoc-Inject: <security-definitions> -->
*
* API version: 1.0.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package nomics
import (
"encoding/json"
)
// Price struct for Price
type Price struct {
// Currency ID
Currency *string `json:"currency,omitempty"`
// Price
Price *string `json:"price,omitempty"`
}
// NewPrice instantiates a new Price object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewPrice() *Price {
this := Price{}
return &this
}
// NewPriceWithDefaults instantiates a new Price object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewPriceWithDefaults() *Price {
this := Price{}
return &this
}
// GetCurrency returns the Currency field value if set, zero value otherwise.
func (o *Price) GetCurrency() string {
if o == nil || o.Currency == nil {
var ret string
return ret
}
return *o.Currency
}
// GetCurrencyOk returns a tuple with the Currency field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Price) GetCurrencyOk() (*string, bool) {
if o == nil || o.Currency == nil {
return nil, false
}
return o.Currency, true
}
// HasCurrency returns a boolean if a field has been set.
func (o *Price) HasCurrency() bool {
if o != nil && o.Currency != nil {
return true
}
return false
}
// SetCurrency gets a reference to the given string and assigns it to the Currency field.
func (o *Price) SetCurrency(v string) {
o.Currency = &v
}
// GetPrice returns the Price field value if set, zero value otherwise.
func (o *Price) GetPrice() string {
if o == nil || o.Price == nil {
var ret string
return ret
}
return *o.Price
}
// GetPriceOk returns a tuple with the Price field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Price) GetPriceOk() (*string, bool) {
if o == nil || o.Price == nil {
return nil, false
}
return o.Price, true
}
// HasPrice returns a boolean if a field has been set.
func (o *Price) HasPrice() bool {
if o != nil && o.Price != nil {
return true
}
return false
}
// SetPrice gets a reference to the given string and assigns it to the Price field.
func (o *Price) SetPrice(v string) {
o.Price = &v
}
func (o Price) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Currency != nil {
toSerialize["currency"] = o.Currency
}
if o.Price != nil {
toSerialize["price"] = o.Price
}
return json.Marshal(toSerialize)
}
type NullablePrice struct {
value *Price
isSet bool
}
func (v NullablePrice) Get() *Price {
return v.value
}
func (v *NullablePrice) Set(val *Price) {
v.value = val
v.isSet = true
}
|
return v.isSet
}
func (v *NullablePrice) Unset() {
v.value = nil
v.isSet = false
}
func NewNullablePrice(val *Price) *NullablePrice {
return &NullablePrice{value: val, isSet: true}
}
func (v NullablePrice) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullablePrice) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}
|
func (v NullablePrice) IsSet() bool {
|
logging_v2_generated_config_service_v2_update_settings_sync.py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateSettings
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-logging
# [START logging_v2_generated_ConfigServiceV2_UpdateSettings_sync]
from google.cloud import logging_v2
def
|
():
# Create a client
client = logging_v2.ConfigServiceV2Client()
# Initialize request argument(s)
request = logging_v2.UpdateSettingsRequest(
name="name_value",
)
# Make the request
response = client.update_settings(request=request)
# Handle the response
print(response)
# [END logging_v2_generated_ConfigServiceV2_UpdateSettings_sync]
|
sample_update_settings
|
Loading.tsx
|
import React, {
useState,
|
createContext,
useCallback,
useEffect,
useMemo,
useContext,
} from "react";
import { getTotalIncome } from "../services/income";
import { getTotalOutcome } from "../services/outcome";
export const TransactionsContext = createContext({} as BalanceProviderData);
export interface BalanceProviderData {
balance: number;
reload: () => void;
}
export const BalanceProvider: React.FC = ({ children }) => {
const [totalIncome, setTotalIncome] = useState(0);
const [totalOutcome, setTotalOutcome] = useState(0);
const reload = useCallback(() => {
getTotalIncome().then(setTotalIncome);
getTotalOutcome().then(setTotalOutcome);
}, []);
useEffect(() => {
reload();
}, [reload]);
const balance = useMemo(
() => totalIncome - totalOutcome,
[totalIncome, totalOutcome]
);
return (
<TransactionsContext.Provider value={{ balance, reload }}>
{children}
</TransactionsContext.Provider>
);
};
export function useBalance(): BalanceProviderData {
return useContext(TransactionsContext);
}
| |
index.tsx
|
import { LeafletMouseEvent } from "leaflet";
import React, { ChangeEvent, FormEvent, useEffect, useState } from "react";
import { useHistory } from "react-router-dom";
import Dropzone from "../../components/Dropzone";
import api from "../../services/api";
import Header from "../Header";
import CityAndState from "./CityAndState";
import CollectItems from "./CollectItems";
import CollectPointMap from "./CollectPointMap";
import Field from "./Field";
import Legend from "./Legend";
import "./style.css";
const CreatePoint = () => {
const history = useHistory();
//#region Inputs, Collect items, submit
const generalData = {
name: "",
email: "",
whatsapp: "",
};
const [formData, setFormData] = useState(generalData);
const [selectedItems, setSelectedItems] = useState<number[]>([]);
const [selectedFile, setSelectedFile] = useState<File>();
function handleInputChange(event: ChangeEvent<HTMLInputElement>) {
console.log(event.target.value);
const { name, value } = event.target;
setFormData({ ...formData, [name]: value });
}
function handleSelectItem(id: number) {
const alreadySelected = selectedItems.findIndex((item) => item === id);
if (alreadySelected >= 0) {
const filteredItems = selectedItems.filter((item) => item !== id);
setSelectedItems(filteredItems);
} else {
setSelectedItems([...selectedItems, id]);
}
}
async function handleSubmit(event: FormEvent) {
event.preventDefault();
const { name, email, whatsapp } = formData;
const uf = selectedState;
const city = selectedCity;
const [lat, long] = selectedPosition;
const items = selectedItems;
const data = new FormData();
data.append("name", name);
data.append("email", email);
data.append("whatsapp", whatsapp);
data.append("uf", uf);
data.append("city", city);
data.append("lat", String(lat));
data.append("long", String(long));
data.append("items", items.join(","));
if (selectedFile) {
data.append("image", selectedFile);
}
await api.post("points", data);
alert("Ponto de coleta criado!");
history.push("/");
}
//#endregion
//#region City and State
const initialState = "";
const initialCity = "";
const [selectedState, setSelectedState] = useState(initialState);
const [selectedCity, setSelectedCity] = useState(initialCity);
function handleSelectUf(event: ChangeEvent<HTMLSelectElement>) {
const uf = event.target.value;
setSelectedState(uf);
}
function handleSelectCity(event: ChangeEvent<HTMLSelectElement>) {
const city = event.target.value;
setSelectedCity(city);
}
//#endregion
//#region Map
const [initialPosition, setInitialPosition] = useState<[number, number]>([
0,
0,
]);
const [selectedPosition, setSelectedPosition] = useState<[number, number]>([
0,
0,
]);
useEffect(() => {
navigator.geolocation.getCurrentPosition((position) => {
const { latitude, longitude } = position.coords;
setInitialPosition([latitude, longitude]);
});
}, []);
function handleMapClick(event: LeafletMouseEvent) {
setSelectedPosition([event.latlng.lat, event.latlng.lng]);
}
//#endregion
return (
<div id="page-create-point">
<div className="content">
<Header />
</div>
<form action="http://localhost:3333/points" onSubmit={handleSubmit}>
<h1>
Cadastro do <br />
campo de coleta
</h1>
<Dropzone onFileUploaded={setSelectedFile} />
<fieldset>
<Legend title="Dados" />
<Field
name="name"
onChange={handleInputChange}
title="Nome da Entidade"
type="text"
/>
<div className="field-group">
<Field
name="email"
onChange={handleInputChange}
title="E-mail"
type="email"
/>
<Field
|
type="text"
/>
</div>
</fieldset>
<fieldset>
<Legend title="Endereço" details="Selecione o endereço no mapa" />
<CollectPointMap
initialPosition={initialPosition}
selectedPosition={selectedPosition}
handleMapClick={handleMapClick}
/>
<CityAndState
handleSelectCity={handleSelectCity}
handleSelectUf={handleSelectUf}
selectedState={selectedState}
/>
</fieldset>
<fieldset>
<Legend title="Itens de coleta" details="Escolha um ou mais itens" />
<CollectItems
selectedItems={selectedItems}
onClick={handleSelectItem}
/>
</fieldset>
<button type="submit">Cadastrar ponto de coleta</button>
</form>
</div>
);
};
export default CreatePoint;
|
name="whatsapp"
onChange={handleInputChange}
title="Whatsapp"
|
simpleWebBrowser.py
|
#Simple We Browser using sockets
import socket
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(('data.pr4e.org', 80))
cmd = 'GET http://data.pr4e.org/romeo.txt HTTP/1.0\r\n\r\n'.encode()
mysock.send(cmd)
|
if len(data) < 1 :
break
print(data.decode())
mysock.close()
|
while True :
data = mysock.recv(512)
|
model_sign_request_body.go
|
package model
import (
"github.com/huaweicloud/huaweicloud-sdk-go-v3/core/utils"
"errors"
"github.com/huaweicloud/huaweicloud-sdk-go-v3/core/converter"
"strings"
)
type SignRequestBody struct {
// 密钥ID,36字节,满足正则匹配“^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$”。 例如:0d0466b0-e727-4d9c-b35d-f84bb474a37f。
KeyId string `json:"key_id"`
// 待签名的消息摘要或者消息,消息长度要求小于4096字节,使用Base64编码。
Message string `json:"message"`
// 签名算法,枚举如下: - RSASSA_PSS_SHA_256 - RSASSA_PSS_SHA_384 - RSASSA_PSS_SHA_512 - RSASSA_PKCS1_V1_5_SHA_256 - RSASSA_PKCS1_V1_5_SHA_384 - RSASSA_PKCS1_V1_5_SHA_512 - ECDSA_SHA_256 - ECDSA_SHA_384 - ECDSA_SHA_512 - SM2DSA_SM3
SigningAlgorithm SignRequestBodySigningAlgorithm `json:"signing_algorithm"`
// 消息类型,默认为“DIGEST”,枚举如下: - DIGEST 表示消息摘要 - RAW 表示消息原文
MessageType *SignRequestBodyMessageType `json:"message_type,omitempty"`
// 请求消息序列号,36字节序列号。 例如:919c82d4-8046-4722-9094-35c3c6524cff。
Sequence *string `json:"sequence,omitempty"`
}
func (o SignRequestBody) String() string {
data, err := utils.Marshal(o)
if err != nil {
return "SignRequestBody struct{}"
}
return strings.Join([]string{"SignRequestBody", string(data)}, " ")
}
type SignRequestBodySigningAlgorithm struct {
value string
}
type SignRequestBodySigningAlgorithmEnum struct {
RSASSA_PSS_SHA_256 SignRequestBodySigningAlgorithm
RSASSA_PSS_SHA_384 SignRequestBodySigningAlgorithm
RSASSA_PSS_SHA_512 SignRequestBodySigningAlgorithm
RSASSA_PKCS1_V1_5_SHA_256 SignRequestBodySigningAlgorithm
RSASSA_PKCS1_V1_5_SHA_384 SignRequestBodySigningAlgorithm
RSASSA_PKCS1_V1_5_SHA_512 SignRequestBodySigningAlgorithm
ECDSA_SHA_256 SignRequestBodySigningAlgorithm
ECDSA_SHA_384 SignRequestBodySigningAlgorithm
ECDSA_SHA_512 SignRequestBodySigningAlgorithm
SM2_DSA_SM3 SignRequestBodySigningAlgorithm
}
func GetSignRequestBodySigningAlgorithmEnum() SignRequestBodySigningAlgorithmEnum {
return SignRequestBodySigningAlgorithmEnum{
RSASSA_PSS_SHA_256: SignRequestBodySigningAlgorithm{
value: "RSASSA_PSS_SHA_256",
},
RSASSA_PSS_SHA_384: SignRequestBodySigningAlgorithm{
value: "RSASSA_PSS_SHA_384",
},
RSASSA_PSS_SHA_512: SignRequestBodySigningAlgorithm{
value: "RSASSA_PSS_SHA_512",
},
RSASSA_PKCS1_V1_5_SHA_256: SignRequestBodySigningAlgorithm{
value: "RSASSA_PKCS1_V1_5_SHA_256",
},
RSASSA_PKCS1_V1_5_SHA_384: SignRequestBodySigningAlgorithm{
value: "RSASSA_PKCS1_V1_5_SHA_384",
},
RSASSA_PKCS1_V1_5_SHA_512: SignRequestBodySigningAlgorithm{
value: "RSASSA_PKCS1_V1_5_SHA_512",
},
ECDSA_SHA_256: SignRequestBodySigningAlgorithm{
value: "ECDSA_SHA_256",
},
ECDSA_SHA_384: SignRequestBodySigningAlgorithm{
value: "ECDSA_SHA_384",
},
ECDSA_SHA_512: SignRequestBodySigningAlgorithm{
value: "ECDSA_SHA_512",
},
SM2_DSA_SM3: SignRequestBodySigningAlgorithm{
value: "SM2DSA_SM3",
},
}
}
func (c SignRequestBodySigningAlgorithm) MarshalJSON() ([]byte, error) {
return utils.Marshal(c.value)
}
func (c *SignRequestBodySigningAlgorithm) UnmarshalJSON(b []byte) error {
myConverter := converter.StringConverterFactory("string")
if myConverter != nil {
val, err := myConverter.CovertStringToInterface(strings.Trim(string(b[:]), "\""))
if err == nil {
c.value = val.(string)
return nil
}
return err
} else {
return errors.New("convert enum data to string error")
}
}
type SignRequestBodyMessageType struct {
value string
}
type SignRequestBodyMessageTypeEnum struct {
DIGEST SignRequestBodyMessageType
RAW SignRequestBodyMessageType
}
func GetSignRequestBodyMessageTypeEnum() SignRequestBodyMessageTypeEnum {
return SignRequestBodyMessageTypeEnum{
DIGEST: SignRequestBodyMessageType{
value: "DIGEST",
},
RAW: SignRequestBodyMessageType{
value: "RAW",
},
}
}
func (c SignRequestBodyMessageType) Mars
|
verterFactory("string")
if myConverter != nil {
val, err := myConverter.CovertStringToInterface(strings.Trim(string(b[:]), "\""))
if err == nil {
c.value = val.(string)
return nil
}
return err
} else {
return errors.New("convert enum data to string error")
}
}
|
halJSON() ([]byte, error) {
return utils.Marshal(c.value)
}
func (c *SignRequestBodyMessageType) UnmarshalJSON(b []byte) error {
myConverter := converter.StringCon
|
device_test.py
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import networkx as nx
import cirq
import cirq.contrib.routing as ccr
def test_xmon_device_to_graph():
with cirq.testing.assert_deprecated("gridqubits_to_graph_device", deadline="v0.12"):
class TestDevice:
qubits = cirq.GridQubit.rect(2, 11)
foxtail_graph = ccr.xmon_device_to_graph(TestDevice())
two_by_eleven_grid_graph = ccr.get_grid_device_graph(2, 11)
assert foxtail_graph.nodes == two_by_eleven_grid_graph.nodes
assert foxtail_graph.edges() == two_by_eleven_grid_graph.edges()
@pytest.mark.parametrize('n_qubits', (2, 5, 11))
def
|
(n_qubits):
graph = ccr.get_linear_device_graph(n_qubits)
assert sorted(graph) == cirq.LineQubit.range(n_qubits)
assert len(graph.edges()) == n_qubits - 1
assert all(abs(a.x - b.x) == 1 for a, b in graph.edges())
def test_nx_qubit_layout():
grid_qubit_graph = ccr.gridqubits_to_graph_device(cirq.GridQubit.rect(5, 5))
pos = ccr.nx_qubit_layout(grid_qubit_graph)
assert len(pos) == len(grid_qubit_graph)
for k, (x, y) in pos.items():
assert x == k.col
assert y == -k.row
def test_nx_qubit_layout_2():
g = nx.from_edgelist(
[
(cirq.LineQubit(0), cirq.LineQubit(1)),
(cirq.LineQubit(1), cirq.LineQubit(2)),
]
)
pos = ccr.nx_qubit_layout(g)
for k, (x, y) in pos.items():
assert x == k.x
assert y == 0.5
def test_nx_qubit_layout_3():
g = nx.from_edgelist(
[
(cirq.NamedQubit('a'), cirq.NamedQubit('b')),
(cirq.NamedQubit('b'), cirq.NamedQubit('c')),
]
)
node_to_i = {
cirq.NamedQubit('a'): 0,
cirq.NamedQubit('b'): 1,
cirq.NamedQubit('c'): 2,
}
pos = ccr.nx_qubit_layout(g)
for k, (x, y) in pos.items():
assert x == 0.5
assert y == node_to_i[k] + 1
|
test_get_linear_device_graph
|
test_differential_rotation.py
|
import os
import pytest
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.coordinates import Longitude
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import TimeDelta
from sunpy.coordinates import frames
from sunpy.coordinates.ephemeris import get_earth
from sunpy.map.maputils import map_edges
from sunpy.physics.differential_rotation import (diff_rot, solar_rotate_coordinate,
differential_rotate,
_get_new_observer, _rotate_submap_edge,
_get_extreme_position, _get_bounding_coordinates,
_warp_sun_coordinates)
import sunpy.data.test
import sunpy.map
# pylint: disable=C0103,R0904,W0201,W0212,W0232,E1103
# Please note the numbers in these tests are not checked for physical
# accuracy, only that they are the values the function was outputting upon
# implementation. This is not a significant issue for the diff_rot function
# since it is relatively simple and the values it produces can be easily
# compared to other implementations of the same simple function. The same
# cannot be said for the solar_rotate_coordinate function. This functionality
# relies accurate knowledge of the solar ephemeris in particular.
# There is no reference implementation of the solar_rotate_coordinate function
# of demonstrated trustworthiness at time of writing in any language. There
# are no known independent values or tests that can be used to test the
# veracity of the solar_rotate_coordinate function. This being the case, the
# solar_rotate_coordinate function is tested against values that it generated.
# Therefore these tests test for consistency, not accuracy. Note that when the
# 0.8.0 branch was released, the solar ephemeris calculation was handed off to
# the relevant Astropy code. The solar_rotate_coordinate tests were changed
# for self-consistency. Note that the change in position comparing the results
# of pre- and 0.8.0 sunpy solar coordinate rotation functionality (rot_hpc
# and solar_rotate_coordinate respectively) was on the order of 0.5 arcseconds.
# At time of writing, the difference between the rotation
# calculated using the pre-0.8.0 rot_hpc function and the SSWIDL equivalent
# rot_xy.pro for the tests given in pre-0.8.0 were on the order of hundredths
# of an arcsecond. I suspect that the reason for the small differences is
# because the sunpy's ephemeris and coordinate transformation infrastructure
# was largely based on that in SSWIDL.
testpath = sunpy.data.test.rootdir
@pytest.fixture
def aia171_test_map():
return sunpy.map.Map(os.path.join(testpath, 'aia_171_level1.fits'))
@pytest.fixture
def all_off_disk_map(aia171_test_map):
return aia171_test_map.submap((1, 1)*u.pix, (11, 12)*u.pix)
@pytest.fixture
def all_on_disk_map(aia171_test_map):
return aia171_test_map.submap((30, 60)*u.pix, (50, 85)*u.pix)
@pytest.fixture
def straddles_limb_map(aia171_test_map):
return aia171_test_map.submap((64, 80)*u.pix, (120, 127)*u.pix)
@pytest.fixture
def aia171_test_map_with_mask(aia171_test_map):
shape = aia171_test_map.data.shape
mask = np.zeros_like(aia171_test_map.data, dtype=bool)
mask[0:shape[0]//2, 0:shape[1]//2] = True
return sunpy.map.Map(np.ma.array(aia171_test_map.data, mask=mask), aia171_test_map.meta)
@pytest.fixture
def aia171_test_submap(aia171_test_map):
bl = SkyCoord(-512 * u.arcsec, 100 * u.arcsec, frame=aia171_test_map.coordinate_frame)
ur = SkyCoord(-100 * u.arcsec, 400 * u.arcsec, frame=aia171_test_map.coordinate_frame)
return aia171_test_map.submap(bl, ur)
@pytest.fixture
def seconds_per_day():
return 24 * 60 * 60.0 * u.s
def test_single(seconds_per_day):
rot = diff_rot(10 * seconds_per_day, 30 * u.deg)
assert_quantity_allclose(rot, 136.8216 * u.deg, rtol=1e-3)
def test_array(seconds_per_day):
rot = diff_rot(10 * seconds_per_day, np.linspace(-70, 70, 2) * u.deg)
assert_quantity_allclose(rot, Longitude(np.array([110.2725, 110.2725]) * u.deg), rtol=1e-3)
def test_synodic(seconds_per_day):
rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='howard', frame_time='synodic')
assert_quantity_allclose(rot, 126.9656 * u.deg, rtol=1e-3)
def test_sidereal(seconds_per_day):
rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='howard', frame_time='sidereal')
assert_quantity_allclose(rot, 136.8216 * u.deg, rtol=1e-3)
def test_howard(seconds_per_day):
rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='howard')
assert_quantity_allclose(rot, 136.8216 * u.deg, rtol=1e-3)
def test_allen(seconds_per_day):
rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='allen')
assert_quantity_allclose(rot, 136.9 * u.deg, rtol=1e-3)
def test_snodgrass(seconds_per_day):
rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='snodgrass')
assert_quantity_allclose(rot, 135.4232 * u.deg, rtol=1e-3)
def test_fail(seconds_per_day):
with pytest.raises(ValueError):
rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='garbage')
def test_solar_rotate_coordinate():
# Testing along the Sun-Earth line, observer is on the Earth
obs_time = '2010-09-10 12:34:56'
observer = get_earth(obs_time)
c = SkyCoord(-570*u.arcsec, 120*u.arcsec, obstime=obs_time, observer=observer, frame=frames.Helioprojective)
new_time = '2010-09-11 12:34:56'
new_observer = get_earth(new_time)
# Test that when both the observer and the time are specified, an error is raised.
with pytest.raises(ValueError):
d = solar_rotate_coordinate(c, observer=observer, time=new_time)
# Test that the code properly filters the observer keyword
with pytest.raises(ValueError):
d = solar_rotate_coordinate(c, observer='earth')
# Test that the code properly filters the time keyword
with pytest.raises(ValueError):
with pytest.warns(UserWarning, match="Using 'time' assumes an Earth-based observer"):
d = solar_rotate_coordinate(c, time='noon')
# Test that the code gives the same output for multiple different inputs
# that define the same observer location and time.
for i, definition in enumerate((1 * u.day, TimeDelta(1*u.day), new_time, new_observer)):
if i in (0, 1, 2):
with pytest.warns(UserWarning, match="Using 'time' assumes an Earth-based observer"):
d = solar_rotate_coordinate(c, time=definition)
else:
d = solar_rotate_coordinate(c, observer=definition)
# Test that a SkyCoordinate is created
assert isinstance(d, SkyCoord)
# Test the coordinate
np.testing.assert_almost_equal(d.Tx.to(u.arcsec).value, -371.8885208634674, decimal=1)
np.testing.assert_almost_equal(d.Ty.to(u.arcsec).value, 105.35006656251727, decimal=1)
np.testing.assert_allclose(d.distance.to(u.km).value, 1.499642e+08, rtol=1e-5)
# Test that the SkyCoordinate is Helioprojective
assert isinstance(d.frame, frames.Helioprojective)
def test_differential_rotate(aia171_test_map, all_off_disk_map, all_on_disk_map, straddles_limb_map):
# Test a map that is entirely off the disk of the Sun
# Should report an error
with pytest.raises(ValueError):
dmap = differential_rotate(all_off_disk_map)
# Test a full disk map
new_observer = get_earth(aia171_test_map.date + 6*u.hr)
dmap = differential_rotate(aia171_test_map, observer=new_observer)
assert dmap.data.shape == aia171_test_map.data.shape
# Test a map that is entirely on disk - triggers sub full disk branches
# Rotated map should have a smaller extent in the x - direction
new_observer = get_earth(all_on_disk_map.date - 48*u.hr)
dmap = differential_rotate(all_on_disk_map, observer=new_observer)
assert dmap.data.shape[1] < all_on_disk_map.data.shape[1]
# This rotated map should have a larger extent in the x direction
new_observer = get_earth(all_on_disk_map.date + 48*u.hr)
dmap = differential_rotate(all_on_disk_map, observer=new_observer)
assert dmap.data.shape[1] > all_on_disk_map.data.shape[1]
# Test a map that straddles the limb - triggers sub full disk branches
# Rotated map should have a smaller extent in the x - direction
new_observer = get_earth(straddles_limb_map.date + 48*u.hr)
dmap = differential_rotate(straddles_limb_map, observer=new_observer)
assert dmap.data.shape[1] < straddles_limb_map.data.shape[1]
# The output map should have the positional properties of the observer
assert dmap.date == new_observer.obstime
assert dmap.heliographic_latitude == new_observer.lat
assert dmap.heliographic_longitude == new_observer.lon
# Tests of the helper functions
def test_get_new_observer(aia171_test_map):
initial_obstime = aia171_test_map.date
rotation_interval = 2 * u.day
new_time = initial_obstime + rotation_interval
time_delta = new_time - initial_obstime
observer = get_earth(initial_obstime + rotation_interval)
# The observer time is set along with other definitions of time
for time in (rotation_interval, new_time, time_delta):
with pytest.raises(ValueError):
new_observer = _get_new_observer(initial_obstime, observer, time)
# Obstime property is present but the value is None
observer_obstime_is_none = SkyCoord(12*u.deg, 46*u.deg, frame=frames.HeliographicStonyhurst)
with pytest.raises(ValueError):
new_observer = _get_new_observer(None, observer_obstime_is_none, None)
# When the observer is set, it gets passed back out
new_observer = _get_new_observer(initial_obstime, observer, None)
assert isinstance(new_observer, SkyCoord)
np.testing.assert_almost_equal(new_observer.transform_to(frames.HeliographicStonyhurst).lon.to(u.deg).value,
observer.transform_to(frames.HeliographicStonyhurst).lon.to(u.deg).value, decimal=3)
np.testing.assert_almost_equal(new_observer.transform_to(frames.HeliographicStonyhurst).lat.to(u.deg).value,
observer.transform_to(frames.HeliographicStonyhurst).lat.to(u.deg).value, decimal=3)
np.testing.assert_almost_equal(new_observer.transform_to(frames.HeliographicStonyhurst).radius.to(u.au).value,
observer.transform_to(frames.HeliographicStonyhurst).radius.to(u.au).value, decimal=3)
# When the time is set, a coordinate for Earth comes back out
for time in (rotation_interval, new_time, time_delta):
with pytest.warns(UserWarning, match="Using 'time' assumes an Earth-based observer"):
new_observer = _get_new_observer(initial_obstime, None, time)
assert isinstance(new_observer, SkyCoord)
np.testing.assert_almost_equal(new_observer.transform_to(frames.HeliographicStonyhurst).lon.to(u.deg).value,
observer.transform_to(frames.HeliographicStonyhurst).lon.to(u.deg).value, decimal=3)
np.testing.assert_almost_equal(new_observer.transform_to(frames.HeliographicStonyhurst).lat.to(u.deg).value,
observer.transform_to(frames.HeliographicStonyhurst).lat.to(u.deg).value, decimal=3)
np.testing.assert_almost_equal(new_observer.transform_to(frames.HeliographicStonyhurst).radius.to(u.au).value,
observer.transform_to(frames.HeliographicStonyhurst).radius.to(u.au).value, decimal=3)
|
with pytest.raises(ValueError):
new_observer = _get_new_observer(initial_obstime, None, None)
def test_rotate_submap_edge(aia171_test_map, all_off_disk_map, all_on_disk_map, straddles_limb_map):
observer = get_earth(aia171_test_map.date + 2*u.day)
# For a map that has all the edges off disk, the function should
# return just the edges of the map - no solar rotation applied.
for this_map in (aia171_test_map, all_off_disk_map):
edges = map_edges(this_map)
for this_edge in range(0, 4):
pixels = edges[this_edge]
res = _rotate_submap_edge(this_map, pixels, observer)
assert all(res.Tx == (this_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Tx)
assert all(res.Ty == (this_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Ty)
# For an on disk map, all the edges should change
edges = map_edges(all_on_disk_map)
for this_edge in range(0, 4):
pixels = edges[this_edge]
res = _rotate_submap_edge(all_on_disk_map, pixels, observer)
assert all(res.Tx != (all_on_disk_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Tx)
assert all(res.Ty != (all_on_disk_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Ty)
# For the limb map, two of the edges move and two do not
edges = map_edges(straddles_limb_map)
for this_edge in (0, 3): # Top and right edges do not move
pixels = edges[this_edge]
res = _rotate_submap_edge(straddles_limb_map, pixels, observer)
assert all(res.Tx == (straddles_limb_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Tx)
assert all(res.Ty == (straddles_limb_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Ty)
for this_edge in (1, 2): # Bottom and left edges do move
pixels = edges[this_edge]
res = _rotate_submap_edge(straddles_limb_map, pixels, observer)
assert all(res.Tx != (straddles_limb_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Tx)
assert all(res.Ty != (straddles_limb_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Ty)
def test_get_extreme_position():
coords = SkyCoord([-1, 0, 1, np.nan]*u.arcsec, [-2, 0, 2, -np.nan]*u.arcsec, frame=frames.Helioprojective)
with pytest.warns(RuntimeWarning, match='All-NaN axis encountered'):
assert _get_extreme_position(coords, 'Tx', operator=np.nanmin) == -1
assert _get_extreme_position(coords, 'Ty', operator=np.nanmin) == -2
assert _get_extreme_position(coords, 'Tx', operator=np.nanmax) == 1
assert _get_extreme_position(coords, 'Ty', operator=np.nanmax) == 2
with pytest.raises(ValueError):
_get_extreme_position(coords, 'lon', operator=np.nanmax)
def test_get_bounding_coordinates():
coords = SkyCoord([-1, 0, 1] * u.arcsec, [-2, 0, 2] * u.arcsec, frame=frames.Helioprojective,
observer=get_earth("1999-09-13 00:00:00"))
bl, tr = _get_bounding_coordinates(coords)
assert bl.Tx == -1*u.arcsec
assert bl.Ty == -2*u.arcsec
assert bl.observer == coords[0].observer
assert tr.Tx == 1*u.arcsec
assert tr.Ty == 2*u.arcsec
assert tr.observer == coords[0].observer
def test_warp_sun_coordinates(all_on_disk_map):
# Define an observer
new_observer = get_earth(all_on_disk_map.date + 6*u.hr)
dummy_array = np.zeros((500, 2))
# Call the warp
xy2 = _warp_sun_coordinates(dummy_array, all_on_disk_map, new_observer)
# Test the properties of the output
assert xy2.shape == dummy_array.shape
assert isinstance(xy2, np.ndarray)
# Test the values - values are not independently found
# We are passing in 500 pairs of (0,0) so all the output pixels should be the same
np.testing.assert_almost_equal(xy2[:, 0], -2.08384686, decimal=2)
np.testing.assert_almost_equal(xy2[:, 1], -0.23927568, decimal=2)
@pytest.mark.array_compare
def test_differential_rotation(aia171_test_map):
with pytest.warns(UserWarning, match="Using 'time' assumes an Earth-based observer"):
rot_map = differential_rotate(aia171_test_map, time=2*u.day)
return rot_map.data
|
# The observer and the time cannot both be None
|
server.go
|
package server
import (
"encoding/json"
"errors"
"log"
"net"
"sync"
"time"
)
const (
RCV_BUFFER_SIZE = 4096
SND_BUFFER_SIZE = 4096
)
type ServerConfig struct {
Host string
MaxGame int
MaxPlayerPerGame int
}
type Server struct {
sync.Mutex
Config ServerConfig
Games GameList
}
func New(config ServerConfig) *Server {
return &Server{
Config: config,
}
}
func (s *Server) Listen() {
listener, err := net.Listen("tcp", s.Config.Host)
if err != nil {
log.Fatal(err.Error())
}
log.Println("Listening for connection on ", listener.Addr().String())
for {
conn, err := listener.Accept()
if err != nil {
log.Println(err.Error())
}
go s.acceptConn(conn)
}
}
func (s *Server) acceptConn(conn net.Conn) {
buf := make([]byte, RCV_BUFFER_SIZE)
numBytes, err := conn.Read(buf)
if err != nil {
//We won't accept this incomming connection
log.Println(err.Error())
conn.Close()
return
}
r := StandardRequest{}
if err = json.Unmarshal(buf[:numBytes], &r); err != nil {
conn.Write(getErrorResponse(err.Error()))
conn.Close()
return
}
var reply []byte
closeConn := false
switch r.Request {
case RequestListGame:
reply = getListGameResponse(s.listGame())
closeConn = true
case RequestCreateGame:
gameId, err := s.newGame(r, conn)
if err != nil {
reply = getErrorResponse(err.Error())
closeConn = true
} else {
reply = getGameCreatedResponse(gameId)
}
case RequestJoinGame:
err = s.joinGame(r, conn)
if err != nil {
reply = getErrorResponse(err.Error())
closeConn = true
} else {
reply = getOkResponse()
}
default:
reply = getErrorResponse("invalid request")
closeConn = true
}
conn.Write(reply)
if closeConn {
conn.Close()
}
}
func (s *Server) newGame(req StandardRequest, conn net.Conn) (int, error) {
if len(s.Games) >= s.Config.MaxGame {
return 0, errors.New("Server is full")
}
gn, found := req.Param["name"]
if !found {
return 0, errors.New("Missing parameter: name")
}
gameName, ok := gn.(string)
if !ok
|
bn, found := req.Param["botName"]
if !found {
return 0, errors.New("Missing parameter: botName")
}
botName, ok := bn.(string)
if !ok {
return 0, errors.New("Invalid parameter: botName must be a string")
}
width := BOARD_DEFAULT_WIDTH
height := BOARD_DEFAULT_HEIGHT
if wi, found := req.Param["width"]; found {
w, ok := wi.(float64)
if !ok {
return 0, errors.New("Invalid parameter: width must be numeric")
}
width = int(w)
}
if hi, found := req.Param["height"]; found {
h, ok := hi.(float64)
if !ok {
return 0, errors.New("Invalid parameter: height must be numeric")
}
height = int(h)
}
s.Lock()
defer s.Unlock()
g := NewGame(gameName, width, height)
p := NewPlayer(botName, conn)
g.Players = append(g.Players, p)
g.Board.Spawn(p.Bot)
s.Games = append(s.Games, g)
go func(g *Game) {
log.Printf("Game %s will start in %s", g.Name, GAME_DEFAULT_START_TIME)
time.Sleep(GAME_DEFAULT_START_TIME)
log.Printf("Game %s is starting", g.Name)
g.Start()
}(g)
return len(s.Games), nil
}
func (s *Server) listGame() []GameDescription {
var games []GameDescription
s.Lock()
defer s.Unlock()
for id, game := range s.Games {
if len(game.Players) >= s.Config.MaxPlayerPerGame {
continue
}
d := GameDescription{
Id: id + 1,
Name: game.Name,
}
for _, p := range game.Players {
d.Bots = append(d.Bots, p.Bot.Name)
}
games = append(games, d)
}
return games
}
func (s *Server) joinGame(req StandardRequest, conn net.Conn) error {
id, found := req.Param["gameId"]
if !found {
return errors.New("Missing parameter: gameId")
}
gameId, ok := id.(float64)
if !ok {
return errors.New("Invalid parameter: gameId must be numeric")
}
bn, found := req.Param["botName"]
if !found {
return errors.New("Missing parameter: botName")
}
botName, ok := bn.(string)
if !ok {
errors.New("Invalid parameter: botName must be a string")
}
if gameId <= 0 {
return errors.New("Invalid parameter: gameId must be > 0")
}
game := s.Games[int(gameId)-1]
if game == nil {
return errors.New("Invalid parameter: game does not exist")
}
game.Lock()
defer game.Unlock()
if game.InProgress {
return errors.New("Game is already started")
}
if len(game.Players) >= s.Config.MaxPlayerPerGame {
return errors.New("Game is full")
}
p := NewPlayer(botName, conn)
game.Players = append(game.Players, p)
game.Board.Spawn(p.Bot)
return nil
}
|
{
return 0, errors.New("Invalid parameter: name must be a string")
}
|
rad-per-deg.js
|
module.exports = require("core-js-pure/features/math/rad-per-deg");
|
||
annotation_compare_viz.py
|
import json
from re import split
import shutil
import os
import sys
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from skimage import io
from shapely.geometry import Polygon
Image.MAX_IMAGE_PIXELS = None
def
|
(path):
if not os.path.exists(path):
os.makedirs(path)
else:
shutil.rmtree(path)
os.makedirs(path)
def dice(a, b):
return 2 * a.intersection(b).area / (a.area + b.area)
def recall(a, b):
return a.intersection(b).area / b.area
def precision(a, b):
return a.intersection(b).area / a.area
def find_diff(dice_thred=0.5, draw_preview=True, log_score=True):
# A - new json
with open(file_A_path) as data_file:
data = json.load(data_file)
average_area = sum(
[Polygon(item["geometry"]["coordinates"][0]).area for item in data]
) / len(data)
area_threshold = average_area / 50
print("average area size: ", average_area)
print("size threshold: ", area_threshold)
coor_list_a = []
for item in data:
coor = item["geometry"]["coordinates"]
poly = Polygon(coor[0])
if poly.area > area_threshold:
coor_list_a.extend(item["geometry"]["coordinates"])
else:
print("A ignore", poly.area)
A_x_list = [[xy[0] for xy in coor] for coor in coor_list_a]
A_y_list = [[xy[1] for xy in coor] for coor in coor_list_a]
A_id_list = [i for i in range(len(coor_list_a))]
# B - old json
with open(file_B_path) as data_file:
data = json.load(data_file)
coor_list_b = []
for item in data:
coor = item["geometry"]["coordinates"]
coor = [
[[xy[1], xy[0]] for xy in coor[0]]
] # for some json. Comment this line if needed
poly = Polygon(coor[0])
if poly.area > area_threshold:
coor_list_b.extend(coor)
else:
print("B ignore", poly.area)
B_x_list = [[xy[0] for xy in coor] for coor in coor_list_b]
B_y_list = [[xy[1] for xy in coor] for coor in coor_list_b]
# find difference
center_list_new = []
for i in range(len(A_x_list)):
mean_x = (sum(A_x_list[i]) - A_x_list[i][-1]) / (len(A_x_list[i]) - 1)
mean_y = (sum(A_y_list[i]) - A_y_list[i][-1]) / (len(A_y_list[i]) - 1)
center_list_new.append((mean_x, mean_y))
center_list_old = []
for i in range(len(B_x_list)):
mean_x = (sum(B_x_list[i]) - B_x_list[i][-1]) / (len(B_x_list[i]) - 1)
mean_y = (sum(B_y_list[i]) - B_y_list[i][-1]) / (len(B_y_list[i]) - 1)
center_list_old.append((mean_x, mean_y))
new_added_list = []
new_added_f1_list = []
new_same_list = []
new_revised_list = []
f1_list = []
positon_threshold = 500
dice_threshold = dice_thred
ignore_count = 0
for i in A_id_list:
x, y = center_list_new[i]
new_p = Polygon(coor_list_a[i])
min_f1 = 0
min_j = -1
_recall, _precision = -1, -1
for j in range(len(center_list_old)):
_x, _y = center_list_old[j]
old_p = Polygon(coor_list_b[j])
if (x - _x) ** 2 + (y - _y) ** 2 <= positon_threshold ** 2:
f1 = dice(new_p, old_p)
if f1 > min_f1:
min_f1 = f1
min_j = j
_recall = recall(new_p, old_p)
_precision = precision(new_p, old_p)
if min_f1 >= 0.999:
_flag = f"Same\t{min_f1}"
new_same_list.append(i)
elif min_f1 >= dice_threshold:
_flag = f"Revised\t{min_f1}"
new_revised_list.append(i)
f1_list.append((min_f1, _recall, _precision))
else:
_flag = f"Added\t{min_f1}"
new_added_list.append(i)
new_added_f1_list.append(min_f1)
# print(min_f1)
if _flag.startswith("Same") or _flag.startswith("Revised"):
if min_j != -1:
coor_list_b.pop(min_j)
center_list_old.pop(min_j)
# print(i, _flag)
removed_count = len(center_list_old)
print(f"A\tB\tsame\tmatch\tadded\tdeleted")
print(
f"{len(A_x_list)}\t{len(B_x_list)}\t{len(new_same_list)}\t{len(new_revised_list)}"
f"\t{len(new_added_list)}\t{removed_count}"
)
print(f"[FP: {len(new_added_list)}/{len(A_x_list)}]")
print(f"[FN: {removed_count}/{len(B_x_list)}]")
# print(f"{len(new_same_list)} same")
# print(f"{len(new_revised_list)} revised")
# print(f"{len(new_added_list)} added")
# print(f"{removed_count} deleted")
# draw visualization
if draw_preview:
ref_image = io.imread(image_ref_path)
background = np.zeros(shape=ref_image.shape, dtype=np.uint8)
img = Image.fromarray(background, "L")
img = img.convert("RGB")
font_path = r"c:\windows\fonts\bahnschrift.ttf"
font = ImageFont.truetype(font_path, size=48)
title_font = ImageFont.truetype(font_path, size=72)
ImageDraw.Draw(img).text(
(100, 400),
text=f"DICE Threshold = {dice_thred}",
font=title_font,
fill="white",
)
ImageDraw.Draw(img).text(
(100, 480),
text=f"PREDICTION [FP: {len(new_added_list)}/{len(A_x_list)}]",
font=title_font,
fill="yellow",
)
ImageDraw.Draw(img).text(
(100, 560),
text=f"GROUND TRUTH [FN: {removed_count}/{len(B_x_list)}]",
font=title_font,
fill="red",
)
for i in new_added_list:
coor_tuple = [(xy[1], xy[0]) for xy in coor_list_a[i]]
# print(coor_tuple)
ImageDraw.Draw(img).line(coor_tuple, fill="yellow", width=6)
# text
f1 = new_added_f1_list[new_added_list.index(i)]
if f1 > 0:
text = "{:.3f}".format(f1) # + f",{Polygon(coor_list_a[i]).area}"
ImageDraw.Draw(img).text(
(center_list_new[i][1] - 40, center_list_new[i][0] + 60),
text,
font=font,
)
for coor_b in coor_list_b:
coor_tuple = [(xy[1], xy[0]) for xy in coor_b]
# print(coor_tuple)
ImageDraw.Draw(img).line(coor_tuple, fill="red", width=6)
# text = f",{Polygon(coor_b).area}"
# ImageDraw.Draw(img).text(
# (coor_tuple[0][0], coor_tuple[0][1]),
# text,
# font=font,
# )
img = np.array(img).astype("uint8")
output_path = image_ref_path.replace(
".png", f'_{str(dice_thred).replace(".","_")}.png'
)
io.imsave(output_path, img)
print(f"Image saved to {output_path}")
# write score
if log_score:
txt_path = file_A_path.replace("json", "txt")
with open(txt_path, "w") as f:
for item in f1_list:
f.write(f"{item[0]},{item[1]},{item[2]}\n")
if __name__ == "__main__":
file_A_path = (
r"C:\Users\yiju\Desktop\Copy\Scripts\masks\1-tom-new-kidney\pred_00a67c839.json"
)
file_B_path = r"C:\Users\yiju\Desktop\Copy\Data\hubmap-kidney-segmentation\test\00a67c839.json"
if len(sys.argv) >= 3:
file_A_path = sys.argv[1]
file_B_path = sys.argv[2]
image_ref_path = file_A_path.replace("json", "png")
A_name = file_A_path.split("\\")[-1].split(".")[0]
B_name = file_B_path.split("\\")[-1].split(".")[0]
print("A: ", A_name)
print("B: ", B_name)
for d in [0.5]: # [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
find_diff(dice_thred=d, draw_preview=True, log_score=True)
|
make_dir
|
lc190.rs
|
/// 颠倒二进制位
/// 颠倒给定的 32 位无符号整数的二进制位。翻转二进制数
pub fn reverse_bits(x: u32) -> u32 {
let mut res = 0;
let mut n = x;
let mut count = 31;
while n > 0 {
res += (n & 1) << count; // 获取最后一位数字 移位到正确位置
n >>= 1; // 右移一位, 去掉末尾数字
count -= 1;
}
res
}
|
}
|
#[test]
fn test_reverse_bits() {
assert_eq!(reverse_bits(43261596),964176192)
|
stack_allocator.rs
|
use super::paging::{self, Page, PageIter, ActivePageTable};
use super::{PAGE_SIZE, FrameAllocator};
pub struct StackAllocator {
range: PageIter,
}
impl StackAllocator {
pub fn new(page_range: PageIter) -> StackAllocator {
StackAllocator { range: page_range }
}
}
impl StackAllocator {
pub fn alloc_stack<FA>(&mut self, active_table: &mut ActivePageTable,
frame_allocator: &mut FA, size_in_pages: usize,)
-> Option<Stack> where FA: FrameAllocator{
if size_in_pages == 0 {
return None; /* a zero sized stack makes no sense */
}
// clone the range, since we only want to change it on success
let mut range = self.range.clone();
// try to allocate the stack pages and a guard page
let guard_page = range.next();
let stack_start = range.next();
let stack_end = if size_in_pages == 1 {
stack_start
} else {
// choose the (size_in_pages-2)th element, since index
// starts at 0 and we already allocated the start page
range.nth(size_in_pages - 2)
};
match (guard_page, stack_start, stack_end) {
(Some(_), Some(start), Some(end)) => {
// success! write back updated range
self.range = range;
// map stack pages to physical frames
for page in Page::range_inclusive(start, end) {
active_table.map(page, paging::WRITABLE, frame_allocator);
}
// create a new stack
let top_of_stack = end.start_address() + PAGE_SIZE;
Some(Stack::new(top_of_stack, start.start_address()))
}
_ => None, /* not enough pages */
}
}
}
#[derive(Debug)]
pub struct Stack {
top: usize,
bottom: usize,
}
impl Stack {
fn new(top: usize, bottom: usize) -> Stack {
assert!(top > bottom);
Stack {
top: top,
bottom: bottom,
}
}
pub fn top(&self) -> usize {
self.top
}
#[allow(dead_code)]
pub fn bottom(&self) -> usize
|
}
|
{
self.bottom
}
|
main.go
|
// Copyright 2016 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
package main
import (
"bytes"
"context"
"encoding/json"
"flag"
"fmt"
"math"
"os"
"os/signal"
"runtime"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/pkg/errors"
"github.com/znbasedb/znbase/pkg/acceptance/localcluster"
"github.com/znbasedb/znbase/pkg/acceptance/localcluster/tc"
"github.com/znbasedb/znbase/pkg/cli"
"github.com/znbasedb/znbase/pkg/server/serverpb"
"github.com/znbasedb/znbase/pkg/storage"
"github.com/znbasedb/znbase/pkg/util/log"
"github.com/znbasedb/znbase/pkg/util/randutil"
"github.com/znbasedb/znbase/pkg/util/syncutil"
"github.com/znbasedb/znbase/pkg/util/timeutil"
)
var workers = flag.Int("w", 1, "number of workers; the i'th worker talks to node i%numNodes")
var numNodes = flag.Int("n", 4, "number of nodes")
var duration = flag.Duration("duration", math.MaxInt64, "how long to run the simulation for")
var blockSize = flag.Int("b", 1000, "block size")
var configFile = flag.String("f", "", "config file that specifies an allocsim workload (overrides -n)")
// Configuration provides a way to configure allocsim via a JSON file.
// TODO(a-robinson): Consider moving all the above options into the config file.
type Configuration struct {
NumWorkers int `json:"NumWorkers"`
Localities []Locality `json:"Localities"`
}
// Locality defines the properties of a single locality as part of a Configuration.
type Locality struct {
Name string `json:"Name"`
LocalityStr string `json:"LocalityStr"`
NumNodes int `json:"NumNodes"`
NumWorkers int `json:"NumWorkers"`
OutgoingLatencies []*struct {
Name string `json:"Name"`
Latency jsonDuration `json:"Latency"`
} `json:"OutgoingLatencies"`
}
type jsonDuration time.Duration
func (j *jsonDuration) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
dur, err := time.ParseDuration(s)
if err != nil {
return err
}
*j = jsonDuration(dur)
return nil
}
func loadConfig(file string) (Configuration, error)
|
// allocSim allows investigation of allocation/rebalancing heuristics. A
// pool of workers generates block_writer-style load where the i'th worker
// talks to node i%numNodes. Every second a monitor goroutine outputs status
// such as the per-node replica and leaseholder counts.
//
// TODO(peter/a-robinson): Allow configuration of zone-config constraints.
type allocSim struct {
*localcluster.Cluster
stats struct {
ops uint64
totalLatencyNanos uint64
errors uint64
}
ranges struct {
syncutil.Mutex
stats allocStats
}
localities []Locality
}
type allocStats struct {
count int
replicas []int
leases []int
replicaAdds []int
leaseTransfers []int
}
func newAllocSim(c *localcluster.Cluster) *allocSim {
return &allocSim{
Cluster: c,
}
}
func (a *allocSim) run(workers int) {
a.setup()
for i := 0; i < workers; i++ {
go a.roundRobinWorker(i, workers)
}
go a.rangeStats(time.Second)
a.monitor(time.Second)
}
func (a *allocSim) runWithConfig(config Configuration) {
a.setup()
numWorkers := config.NumWorkers
for _, locality := range config.Localities {
numWorkers += locality.NumWorkers
}
firstNodeInLocality := 0
for _, locality := range config.Localities {
for i := 0; i < locality.NumWorkers; i++ {
node := firstNodeInLocality + (i % locality.NumNodes)
startNum := firstNodeInLocality + i
go a.worker(node, startNum, numWorkers)
}
firstNodeInLocality += locality.NumNodes
}
for i := 0; i < config.NumWorkers; i++ {
go a.roundRobinWorker(firstNodeInLocality+i, numWorkers)
}
go a.rangeStats(time.Second)
a.monitor(time.Second)
}
func (a *allocSim) setup() {
db := a.Nodes[0].DB()
if _, err := db.Exec("CREATE DATABASE IF NOT EXISTS allocsim"); err != nil {
log.Fatal(context.Background(), err)
}
blocks := `
CREATE TABLE IF NOT EXISTS blocks (
id INT NOT NULL,
num INT NOT NULL,
data BYTES NOT NULL,
PRIMARY KEY (id, num)
)
`
if _, err := db.Exec(blocks); err != nil {
log.Fatal(context.Background(), err)
}
}
func (a *allocSim) maybeLogError(err error) {
if localcluster.IsUnavailableError(err) {
return
}
log.Error(context.Background(), err)
atomic.AddUint64(&a.stats.errors, 1)
}
const insertStmt = `INSERT INTO allocsim.blocks (id, num, data) VALUES ($1, $2, repeat('a', $3)::bytes)`
func (a *allocSim) worker(dbIdx, startNum, workers int) {
r, _ := randutil.NewPseudoRand()
db := a.Nodes[dbIdx%len(a.Nodes)].DB()
for num := startNum; true; num += workers {
now := timeutil.Now()
if _, err := db.Exec(insertStmt, r.Int63(), num, *blockSize); err != nil {
a.maybeLogError(err)
} else {
atomic.AddUint64(&a.stats.ops, 1)
atomic.AddUint64(&a.stats.totalLatencyNanos, uint64(timeutil.Since(now).Nanoseconds()))
}
}
}
func (a *allocSim) roundRobinWorker(startNum, workers int) {
r, _ := randutil.NewPseudoRand()
for i := 0; ; i++ {
now := timeutil.Now()
db := a.Nodes[i%len(a.Nodes)].DB()
if db == nil {
continue // nodes are shutting down
}
if _, err := db.Exec(insertStmt, r.Int63(), startNum+i*workers, *blockSize); err != nil {
a.maybeLogError(err)
} else {
atomic.AddUint64(&a.stats.ops, 1)
atomic.AddUint64(&a.stats.totalLatencyNanos, uint64(timeutil.Since(now).Nanoseconds()))
}
}
}
func (a *allocSim) rangeInfo() allocStats {
stats := allocStats{
replicas: make([]int, len(a.Nodes)),
replicaAdds: make([]int, len(a.Nodes)),
leases: make([]int, len(a.Nodes)),
leaseTransfers: make([]int, len(a.Nodes)),
}
// Retrieve the metrics for each node and extract the replica and leaseholder
// counts.
var wg sync.WaitGroup
wg.Add(len(a.Nodes))
for i := 0; i < len(a.Nodes); i++ {
go func(i int) {
defer wg.Done()
status := a.Nodes[i].StatusClient()
if status == nil {
// Cluster is shutting down.
return
}
resp, err := status.Metrics(context.Background(), &serverpb.MetricsRequest{
NodeId: fmt.Sprintf("local"),
})
if err != nil {
log.Fatal(context.Background(), err)
}
var metrics map[string]interface{}
if err := json.Unmarshal(resp.Data, &metrics); err != nil {
log.Fatal(context.Background(), err)
}
stores := metrics["stores"].(map[string]interface{})
for _, v := range stores {
storeMetrics := v.(map[string]interface{})
if v, ok := storeMetrics["replicas"]; ok {
stats.replicas[i] += int(v.(float64))
}
if v, ok := storeMetrics["replicas.leaseholders"]; ok {
stats.leases[i] += int(v.(float64))
}
if v, ok := storeMetrics["range.adds"]; ok {
stats.replicaAdds[i] += int(v.(float64))
}
if v, ok := storeMetrics["leases.transfers.success"]; ok {
stats.leaseTransfers[i] += int(v.(float64))
}
}
}(i)
}
wg.Wait()
for _, v := range stats.replicas {
stats.count += v
}
return stats
}
func (a *allocSim) rangeStats(d time.Duration) {
for {
stats := a.rangeInfo()
a.ranges.Lock()
a.ranges.stats = stats
a.ranges.Unlock()
time.Sleep(d)
}
}
const padding = "__________________"
func formatHeader(header string, numberNodes int, localities []Locality) string {
var buf bytes.Buffer
_, _ = buf.WriteString(header)
for i := 1; i <= numberNodes; i++ {
node := fmt.Sprintf("%d", i)
if localities != nil {
node += fmt.Sprintf(":%s", localities[i-1].Name)
}
fmt.Fprintf(&buf, "%s%s", padding[:len(padding)-len(node)], node)
}
return buf.String()
}
func (a *allocSim) monitor(d time.Duration) {
formatNodes := func(stats allocStats) string {
var buf bytes.Buffer
for i := range stats.replicas {
alive := a.Nodes[i].Alive()
if !alive {
_, _ = buf.WriteString("\033[0;31;49m")
}
fmt.Fprintf(&buf, "%*s", len(padding), fmt.Sprintf("%d/%d/%d/%d",
stats.replicas[i], stats.leases[i], stats.replicaAdds[i], stats.leaseTransfers[i]))
if !alive {
_, _ = buf.WriteString("\033[0m")
}
}
return buf.String()
}
start := timeutil.Now()
lastTime := start
var numReplicas int
var lastOps uint64
for ticks := 0; true; ticks++ {
time.Sleep(d)
now := timeutil.Now()
elapsed := now.Sub(lastTime).Seconds()
ops := atomic.LoadUint64(&a.stats.ops)
totalLatencyNanos := atomic.LoadUint64(&a.stats.totalLatencyNanos)
a.ranges.Lock()
rangeStats := a.ranges.stats
a.ranges.Unlock()
if ticks%20 == 0 || numReplicas != len(rangeStats.replicas) {
numReplicas = len(rangeStats.replicas)
fmt.Println(formatHeader("_elapsed__ops/sec__average__latency___errors_replicas", numReplicas, a.localities))
}
var avgLatency float64
if ops > 0 {
avgLatency = float64(totalLatencyNanos/ops) / float64(time.Millisecond)
}
fmt.Printf("%8s %8.1f %8.1f %6.1fms %8d %8d%s\n",
time.Duration(now.Sub(start).Seconds()+0.5)*time.Second,
float64(ops-lastOps)/elapsed, float64(ops)/now.Sub(start).Seconds(), avgLatency,
atomic.LoadUint64(&a.stats.errors), rangeStats.count, formatNodes(rangeStats))
lastTime = now
lastOps = ops
}
}
func (a *allocSim) finalStatus() {
a.ranges.Lock()
defer a.ranges.Unlock()
// TODO(bram): With the addition of localities, these stats will have to be
// updated.
fmt.Println(formatHeader("___stats___________________________", len(a.ranges.stats.replicas), a.localities))
genStats := func(name string, counts []int) {
var total float64
for _, count := range counts {
total += float64(count)
}
mean := total / float64(len(counts))
var buf bytes.Buffer
fmt.Fprintf(&buf, "%8s (total%% / diff%%) ", name)
for _, count := range counts {
var percent, fromMean float64
if total != 0 {
percent = float64(count) / total * 100
fromMean = (float64(count) - mean) / total * 100
}
fmt.Fprintf(&buf, " %9.9s", fmt.Sprintf("%.0f/%.0f", percent, fromMean))
}
fmt.Println(buf.String())
}
genStats("replicas", a.ranges.stats.replicas)
genStats("leases", a.ranges.stats.leases)
}
func handleStart() bool {
if len(os.Args) < 2 || os.Args[1] != "start" {
return false
}
// Speed up lease transfer decisions by not requiring quite as much data
// before beginning to make them. Without this, the rapid splitting of ranges
// in the few minutes after allocsim starts up causes it to take a long time
// for leases to settle onto other nodes even when requests are skewed heavily
// onto them.
storage.MinLeaseTransferStatsDuration = 10 * time.Second
cli.Main()
return true
}
func main() {
if handleStart() {
return
}
flag.Parse()
var config Configuration
if *configFile != "" {
var err error
config, err = loadConfig(*configFile)
if err != nil {
log.Fatal(context.Background(), err)
}
}
perNodeCfg := localcluster.MakePerNodeFixedPortsCfg(*numNodes)
// TODO(a-robinson): Automatically run github.com/tylertreat/comcast for
// simpler configs that just have a single latency between all nodes.
var separateAddrs bool
for _, locality := range config.Localities {
if len(locality.OutgoingLatencies) != 0 {
separateAddrs = true
if runtime.GOOS != "linux" {
log.Fatal(context.Background(),
"configs that set per-locality outgoing latencies are only supported on linux")
}
break
}
}
if separateAddrs {
for i := range perNodeCfg {
s := perNodeCfg[i]
s.Addr = fmt.Sprintf("127.0.0.%d", i)
perNodeCfg[i] = s
}
}
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
localities := make([]Locality, *numNodes)
if len(config.Localities) != 0 {
nodesPerLocality := make(map[string][]int)
var nodeIdx int
for _, locality := range config.Localities {
for i := 0; i < locality.NumNodes; i++ {
s := perNodeCfg[nodeIdx] // avoid map assignment problems
if locality.LocalityStr != "" {
s.ExtraArgs = []string{fmt.Sprintf("--locality=%s", locality.LocalityStr)}
} else {
s.ExtraArgs = []string{fmt.Sprintf("--locality=l=%s", locality.Name)}
}
if separateAddrs {
s.ExtraEnv = []string{fmt.Sprintf("ZNBASE_SOURCE_IP_ADDRESS=%s", s.Addr)}
}
localities[nodeIdx] = locality
nodesPerLocality[locality.Name] = append(nodesPerLocality[locality.Name], nodeIdx)
perNodeCfg[nodeIdx] = s
nodeIdx++
}
}
var tcController *tc.Controller
if separateAddrs {
// Since localcluster only uses loopback IPs for the nodes, we only need to
// set up tc rules on the loopback device.
tcController = tc.NewController("lo")
if err := tcController.Init(); err != nil {
log.Fatal(context.Background(), err)
}
defer func() {
if err := tcController.CleanUp(); err != nil {
log.Error(context.Background(), err)
}
}()
}
for _, locality := range localities {
for _, outgoing := range locality.OutgoingLatencies {
if outgoing.Latency > 0 {
for _, srcNodeIdx := range nodesPerLocality[locality.Name] {
for _, dstNodeIdx := range nodesPerLocality[outgoing.Name] {
if err := tcController.AddLatency(
perNodeCfg[srcNodeIdx].Addr, perNodeCfg[dstNodeIdx].Addr, time.Duration(outgoing.Latency/2),
); err != nil {
log.Fatal(context.Background(), err)
}
}
}
}
}
}
}
cfg := localcluster.ClusterConfig{
AllNodeArgs: append(flag.Args(), "--vmodule=allocator=3,allocator_scorer=3,replicate_queue=3"),
Binary: os.Args[0],
NumNodes: *numNodes,
DB: "allocsim",
NumWorkers: *workers,
PerNodeCfg: perNodeCfg,
DataDir: "znbase-data-allocsim",
}
c := localcluster.New(cfg)
a := newAllocSim(c)
a.localities = localities
log.SetExitFunc(false /* hideStack */, func(code int) {
c.Close()
os.Exit(code)
})
go func() {
var exitStatus int
select {
case s := <-signalCh:
log.Infof(context.Background(), "signal received: %v", s)
exitStatus = 1
case <-time.After(*duration):
log.Infof(context.Background(), "finished run of: %s", *duration)
}
c.Close()
a.finalStatus()
os.Exit(exitStatus)
}()
c.Start(context.Background())
defer c.Close()
c.UpdateZoneConfig(1, 1<<20)
_, err := c.Nodes[0].DB().Exec("SET CLUSTER SETTING kv.raft_log.disable_synchronization_unsafe = true")
if err != nil {
log.Fatal(context.Background(), err)
}
if len(config.Localities) != 0 {
a.runWithConfig(config)
} else {
a.run(*workers)
}
}
|
{
fileHandle, err := os.Open(file)
if err != nil {
return Configuration{}, errors.Wrapf(err, "failed to open config file %q", file)
}
defer fileHandle.Close()
var config Configuration
jsonParser := json.NewDecoder(fileHandle)
if err := jsonParser.Decode(&config); err != nil {
return Configuration{}, errors.Wrapf(err, "failed to decode %q as json", file)
}
*numNodes = 0
*workers = config.NumWorkers
for _, locality := range config.Localities {
*numNodes += locality.NumNodes
*workers += locality.NumWorkers
}
return config, nil
}
|
model_serialize_test.py
|
# third party
import numpy as np
import pytest
from sklearn.linear_model import LogisticRegression
# syft absolute
import syft as sy
from syft.experimental_flags import flags
sy.load("sklearn")
sy.load("numpy")
@pytest.mark.vendor(lib="sklearn")
@pytest.mark.parametrize("arrow_backend", [True, False])
def test_logistic_model_serde(
arrow_backend: bool, root_client: sy.VirtualMachineClient
) -> None:
|
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([0, 0, 1, 1])
clf = LogisticRegression(random_state=0).fit(X, y)
clf_remote = clf.send(root_client)
clf_2 = clf_remote.get()
dict_1 = vars(clf)
dict_2 = vars(clf_2)
for key in dict_1.keys():
if type(dict_1[key]) == float:
assert abs(dict_1[key] - dict_2[key]) < 0.0001
elif type(dict_1[key]) == np.ndarray:
assert dict_1[key].all() == dict_2[key].all()
else:
assert dict_1[key] == dict_2[key]
|
|
selectcopy.js
|
// selects or copies the targeted item
// https://codepen.io/shaikmaqsood/pen/XmydxJ/
function copyToClipboard(target) {
var $temp = $("<input>");
$("body").append($temp);
var text = ($(target).text() || $(target).val());
$temp.val(text).select();
document.execCommand("copy");
$temp.remove();
}
function
|
(target) {
$(target).select();
}
$(document).ready(function() {
$('._target-copy').on('click', function() {
var target = $(this).attr('data-target');
copyToClipboard(target);
})
$('._target-select').on('click', function() {
var target = $(this).attr('data-target');
select(target);
})
});
|
select
|
JobStatusForMonitoring.py
|
#!/usr/bin/env python
"""
_JobStatusMonitoring_
MySQL implementation for loading a job by scheduler status
"""
from WMCore.Database.DBFormatter import DBFormatter
class JobStatusForMonitoring(DBFormatter):
"""
_LoadForMonitoring_
Load all jobs with a certain scheduler status including
all the joined information.
"""
sql = """SELECT STRAIGHT_JOIN wwf.name as workflow, count(rj.wmbs_id) AS num_jobs,
st.name AS status, wl.plugin AS plugin, wu.cert_dn AS owner
FROM bl_runjob rj
LEFT OUTER JOIN wmbs_users wu ON wu.id = rj.user_id
INNER JOIN bl_status st ON rj.sched_status = st.id
INNER JOIN wmbs_job wj ON wj.id = rj.wmbs_id
|
INNER JOIN wmbs_subscription ws ON ws.id = wjg.subscription
INNER JOIN wmbs_workflow wwf ON wwf.id = ws.workflow
LEFT OUTER JOIN wmbs_location wl ON wl.id = wj.location
WHERE rj.status = :complete
GROUP BY wwf.name, plugin, st.name
"""
def mappedStatusFormat(self, results):
"""
convert each indiviual batch system plugin status to a common status.
Warning: This assumes all the plugin are under WMCore/BossAir/Plugins/
and its module name and class name should be the same.
__import__ doesn't reload if the module exist.
(so doesn't need to keep track what is already imported.
The performance difference should be small. If desired, maintain
the cache to keep track imported plugIns
"""
commonStates = {}
for data in results:
module = __import__("WMCore.BossAir.Plugins.%s" % data['plugin'],
globals(), locals(), [data['plugin']])
plugIn = getattr(module, data['plugin'])
state = plugIn.stateMap().get(data['status'])
if data['workflow'] not in commonStates:
commonStates[data['workflow']] = {}
commonStates[data['workflow']].setdefault(state, 0)
commonStates[data['workflow']][state] += data['num_jobs']
results = []
for key, value in commonStates.items():
reformedData = {'request_name': key}
reformedData.update(value)
results.append(reformedData)
return results
def execute(self, commonFormat = True, conn = None, transaction = False):
"""
_execute_
Load all jobs either running or not (running by default)
"""
complete = '1'
binds = {'complete': complete}
result = self.dbi.processData(self.sql, binds, conn = conn,
transaction = transaction)
if commonFormat:
return self.mappedStatusFormat(self.formatDict(result))
else:
return self.formatDict(result)
|
INNER JOIN wmbs_jobgroup wjg ON wjg.id = wj.jobgroup
|
categorical.py
|
import torch
from ptstat.core import RandomVariable, _to_v
class Categorical(RandomVariable):
"""
Categorical over 0,...,N-1 with arbitrary probabilities, 1-dimensional rv, long type.
"""
def __init__(self, p=None, p_min=1E-6, size=None, cuda=False):
super(Categorical, self).__init__()
if size:
assert len(size) == 2, str(size)
p = _to_v(1 / size[1], size, cuda)
else:
assert len(p.size()) == 2, str(p.size())
assert torch.min(p.data) >= 0, str(torch.min(p.data))
assert torch.max(torch.abs(torch.sum(p.data, 1) - 1)) <= 1E-5
self._p = torch.clamp(p, p_min)
def _size(self):
return self._p.size()[0], 1 # Type is Long.
def _log_pdf(self, x):
return torch.log(self._p.gather(1, x)).squeeze()
def _sample(self):
|
def _entropy(self):
return - torch.sum(self._p * torch.log(self._p), 1).squeeze()
|
return self._p.multinomial(1, True)
|
test_feedexport.py
|
import csv
import json
import os
import random
import shutil
import string
import tempfile
import warnings
from io import BytesIO
from logging import getLogger
from pathlib import Path
from string import ascii_letters, digits
from unittest import mock
from urllib.parse import urljoin, urlparse, quote
from urllib.request import pathname2url
import lxml.etree
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial import unittest
from w3lib.url import file_uri_to_path, path_to_file_uri
from zope.interface import implementer
from zope.interface.verify import verifyObject
import scrapy
from scrapy.crawler import CrawlerRunner
from scrapy.exporters import CsvItemExporter
from scrapy.extensions.feedexport import (BlockingFeedStorage, FileFeedStorage, FTPFeedStorage,
IFeedStorage, S3FeedStorage, StdoutFeedStorage)
from scrapy.settings import Settings
from scrapy.utils.python import to_unicode
from scrapy.utils.test import assert_aws_environ, get_crawler, get_s3_content_and_delete
from tests.mockserver import MockServer
class FileFeedStorageTest(unittest.TestCase):
def test_store_file_uri(self):
path = os.path.abspath(self.mktemp())
uri = path_to_file_uri(path)
return self._assert_stores(FileFeedStorage(uri), path)
def test_store_file_uri_makedirs(self):
path = os.path.abspath(self.mktemp())
path = os.path.join(path, 'more', 'paths', 'file.txt')
uri = path_to_file_uri(path)
return self._assert_stores(FileFeedStorage(uri), path)
def test_store_direct_path(self):
path = os.path.abspath(self.mktemp())
return self._assert_stores(FileFeedStorage(path), path)
def test_store_direct_path_relative(self):
path = self.mktemp()
return self._assert_stores(FileFeedStorage(path), path)
def test_interface(self):
path = self.mktemp()
st = FileFeedStorage(path)
verifyObject(IFeedStorage, st)
@defer.inlineCallbacks
def _assert_stores(self, storage, path):
spider = scrapy.Spider("default")
file = storage.open(spider)
file.write(b"content")
yield storage.store(file)
self.assertTrue(os.path.exists(path))
try:
with open(path, 'rb') as fp:
self.assertEqual(fp.read(), b"content")
finally:
os.unlink(path)
class FTPFeedStorageTest(unittest.TestCase):
def get_test_spider(self, settings=None):
class TestSpider(scrapy.Spider):
name = 'test_spider'
crawler = get_crawler(settings_dict=settings)
spider = TestSpider.from_crawler(crawler)
return spider
def test_store(self):
uri = os.environ.get('FEEDTEST_FTP_URI')
path = os.environ.get('FEEDTEST_FTP_PATH')
if not (uri and path):
raise unittest.SkipTest("No FTP server available for testing")
st = FTPFeedStorage(uri)
verifyObject(IFeedStorage, st)
return self._assert_stores(st, path)
def test_store_active_mode(self):
uri = os.environ.get('FEEDTEST_FTP_URI')
path = os.environ.get('FEEDTEST_FTP_PATH')
if not (uri and path):
raise unittest.SkipTest("No FTP server available for testing")
use_active_mode = {'FEED_STORAGE_FTP_ACTIVE': True}
crawler = get_crawler(settings_dict=use_active_mode)
st = FTPFeedStorage.from_crawler(crawler, uri)
verifyObject(IFeedStorage, st)
return self._assert_stores(st, path)
def test_uri_auth_quote(self):
# RFC3986: 3.2.1. User Information
pw_quoted = quote(string.punctuation, safe='')
st = FTPFeedStorage('ftp://foo:%[email protected]/some_path' % pw_quoted)
self.assertEqual(st.password, string.punctuation)
@defer.inlineCallbacks
def _assert_stores(self, storage, path):
spider = self.get_test_spider()
file = storage.open(spider)
file.write(b"content")
yield storage.store(file)
self.assertTrue(os.path.exists(path))
try:
with open(path, 'rb') as fp:
self.assertEqual(fp.read(), b"content")
# again, to check s3 objects are overwritten
yield storage.store(BytesIO(b"new content"))
with open(path, 'rb') as fp:
self.assertEqual(fp.read(), b"new content")
finally:
os.unlink(path)
class BlockingFeedStorageTest(unittest.TestCase):
def get_test_spider(self, settings=None):
class TestSpider(scrapy.Spider):
name = 'test_spider'
crawler = get_crawler(settings_dict=settings)
spider = TestSpider.from_crawler(crawler)
return spider
def test_default_temp_dir(self):
b = BlockingFeedStorage()
tmp = b.open(self.get_test_spider())
tmp_path = os.path.dirname(tmp.name)
self.assertEqual(tmp_path, tempfile.gettempdir())
def test_temp_file(self):
b = BlockingFeedStorage()
tests_path = os.path.dirname(os.path.abspath(__file__))
spider = self.get_test_spider({'FEED_TEMPDIR': tests_path})
tmp = b.open(spider)
tmp_path = os.path.dirname(tmp.name)
self.assertEqual(tmp_path, tests_path)
def test_invalid_folder(self):
b = BlockingFeedStorage()
tests_path = os.path.dirname(os.path.abspath(__file__))
invalid_path = os.path.join(tests_path, 'invalid_path')
spider = self.get_test_spider({'FEED_TEMPDIR': invalid_path})
self.assertRaises(OSError, b.open, spider=spider)
class S3FeedStorageTest(unittest.TestCase):
@mock.patch('scrapy.utils.project.get_project_settings',
new=mock.MagicMock(return_value={'AWS_ACCESS_KEY_ID': 'conf_key',
'AWS_SECRET_ACCESS_KEY': 'conf_secret'}),
create=True)
def test_parse_credentials(self):
try:
import boto # noqa: F401
except ImportError:
raise unittest.SkipTest("S3FeedStorage requires boto")
aws_credentials = {'AWS_ACCESS_KEY_ID': 'settings_key',
'AWS_SECRET_ACCESS_KEY': 'settings_secret'}
crawler = get_crawler(settings_dict=aws_credentials)
# Instantiate with crawler
storage = S3FeedStorage.from_crawler(crawler,
's3://mybucket/export.csv')
self.assertEqual(storage.access_key, 'settings_key')
self.assertEqual(storage.secret_key, 'settings_secret')
# Instantiate directly
storage = S3FeedStorage('s3://mybucket/export.csv',
aws_credentials['AWS_ACCESS_KEY_ID'],
aws_credentials['AWS_SECRET_ACCESS_KEY'])
self.assertEqual(storage.access_key, 'settings_key')
self.assertEqual(storage.secret_key, 'settings_secret')
# URI priority > settings priority
storage = S3FeedStorage('s3://uri_key:uri_secret@mybucket/export.csv',
aws_credentials['AWS_ACCESS_KEY_ID'],
aws_credentials['AWS_SECRET_ACCESS_KEY'])
self.assertEqual(storage.access_key, 'uri_key')
self.assertEqual(storage.secret_key, 'uri_secret')
# Backward compatibility for initialising without settings
with warnings.catch_warnings(record=True) as w:
storage = S3FeedStorage('s3://mybucket/export.csv')
self.assertEqual(storage.access_key, 'conf_key')
self.assertEqual(storage.secret_key, 'conf_secret')
self.assertTrue('without AWS keys' in str(w[-1].message))
@defer.inlineCallbacks
def test_store(self):
assert_aws_environ()
uri = os.environ.get('S3_TEST_FILE_URI')
if not uri:
raise unittest.SkipTest("No S3 URI available for testing")
access_key = os.environ.get('AWS_ACCESS_KEY_ID')
secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
storage = S3FeedStorage(uri, access_key, secret_key)
verifyObject(IFeedStorage, storage)
file = storage.open(scrapy.Spider("default"))
expected_content = b"content: \xe2\x98\x83"
file.write(expected_content)
yield storage.store(file)
u = urlparse(uri)
content = get_s3_content_and_delete(u.hostname, u.path[1:])
self.assertEqual(content, expected_content)
def test_init_without_acl(self):
storage = S3FeedStorage(
's3://mybucket/export.csv',
'access_key',
'secret_key'
)
self.assertEqual(storage.access_key, 'access_key')
self.assertEqual(storage.secret_key, 'secret_key')
self.assertEqual(storage.acl, None)
def test_init_with_acl(self):
storage = S3FeedStorage(
's3://mybucket/export.csv',
'access_key',
'secret_key',
'custom-acl'
)
self.assertEqual(storage.access_key, 'access_key')
self.assertEqual(storage.secret_key, 'secret_key')
self.assertEqual(storage.acl, 'custom-acl')
def test_from_crawler_without_acl(self):
settings = {
'AWS_ACCESS_KEY_ID': 'access_key',
'AWS_SECRET_ACCESS_KEY': 'secret_key',
}
crawler = get_crawler(settings_dict=settings)
storage = S3FeedStorage.from_crawler(
crawler,
's3://mybucket/export.csv'
)
self.assertEqual(storage.access_key, 'access_key')
self.assertEqual(storage.secret_key, 'secret_key')
self.assertEqual(storage.acl, None)
def test_from_crawler_with_acl(self):
settings = {
'AWS_ACCESS_KEY_ID': 'access_key',
'AWS_SECRET_ACCESS_KEY': 'secret_key',
'FEED_STORAGE_S3_ACL': 'custom-acl',
}
crawler = get_crawler(settings_dict=settings)
storage = S3FeedStorage.from_crawler(
crawler,
's3://mybucket/export.csv'
)
self.assertEqual(storage.access_key, 'access_key')
self.assertEqual(storage.secret_key, 'secret_key')
self.assertEqual(storage.acl, 'custom-acl')
@defer.inlineCallbacks
def test_store_botocore_without_acl(self):
try:
import botocore # noqa: F401
except ImportError:
raise unittest.SkipTest('botocore is required')
storage = S3FeedStorage(
's3://mybucket/export.csv',
'access_key',
'secret_key',
)
self.assertEqual(storage.access_key, 'access_key')
self.assertEqual(storage.secret_key, 'secret_key')
self.assertEqual(storage.acl, None)
storage.s3_client = mock.MagicMock()
yield storage.store(BytesIO(b'test file'))
self.assertNotIn('ACL', storage.s3_client.put_object.call_args[1])
@defer.inlineCallbacks
def test_store_botocore_with_acl(self):
try:
import botocore # noqa: F401
except ImportError:
raise unittest.SkipTest('botocore is required')
storage = S3FeedStorage(
's3://mybucket/export.csv',
'access_key',
'secret_key',
'custom-acl'
)
self.assertEqual(storage.access_key, 'access_key')
self.assertEqual(storage.secret_key, 'secret_key')
self.assertEqual(storage.acl, 'custom-acl')
storage.s3_client = mock.MagicMock()
yield storage.store(BytesIO(b'test file'))
self.assertEqual(
storage.s3_client.put_object.call_args[1].get('ACL'),
'custom-acl'
)
@defer.inlineCallbacks
def test_store_not_botocore_without_acl(self):
storage = S3FeedStorage(
's3://mybucket/export.csv',
'access_key',
'secret_key',
)
self.assertEqual(storage.access_key, 'access_key')
self.assertEqual(storage.secret_key, 'secret_key')
self.assertEqual(storage.acl, None)
storage.is_botocore = False
storage.connect_s3 = mock.MagicMock()
self.assertFalse(storage.is_botocore)
yield storage.store(BytesIO(b'test file'))
conn = storage.connect_s3(*storage.connect_s3.call_args)
bucket = conn.get_bucket(*conn.get_bucket.call_args)
key = bucket.new_key(*bucket.new_key.call_args)
self.assertNotIn(
dict(policy='custom-acl'),
key.set_contents_from_file.call_args
)
@defer.inlineCallbacks
def test_store_not_botocore_with_acl(self):
storage = S3FeedStorage(
's3://mybucket/export.csv',
'access_key',
'secret_key',
'custom-acl'
)
self.assertEqual(storage.access_key, 'access_key')
self.assertEqual(storage.secret_key, 'secret_key')
self.assertEqual(storage.acl, 'custom-acl')
storage.is_botocore = False
storage.connect_s3 = mock.MagicMock()
self.assertFalse(storage.is_botocore)
yield storage.store(BytesIO(b'test file'))
conn = storage.connect_s3(*storage.connect_s3.call_args)
bucket = conn.get_bucket(*conn.get_bucket.call_args)
key = bucket.new_key(*bucket.new_key.call_args)
self.assertIn(
dict(policy='custom-acl'),
key.set_contents_from_file.call_args
)
class StdoutFeedStorageTest(unittest.TestCase):
@defer.inlineCallbacks
def test_store(self):
out = BytesIO()
storage = StdoutFeedStorage('stdout:', _stdout=out)
file = storage.open(scrapy.Spider("default"))
file.write(b"content")
yield storage.store(file)
self.assertEqual(out.getvalue(), b"content")
class FromCrawlerMixin:
init_with_crawler = False
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
cls.init_with_crawler = True
return cls(*args, **kwargs)
class FromCrawlerCsvItemExporter(CsvItemExporter, FromCrawlerMixin):
pass
class FromCrawlerFileFeedStorage(FileFeedStorage, FromCrawlerMixin):
pass
@implementer(IFeedStorage)
class LogOnStoreFileStorage:
"""
This storage logs inside `store` method.
It can be used to make sure `store` method is invoked.
"""
def __init__(self, uri):
self.path = file_uri_to_path(uri)
self.logger = getLogger()
def open(self, spider):
return tempfile.NamedTemporaryFile(prefix='feed-')
def store(self, file):
self.logger.info('Storage.store is called')
file.close()
class FeedExportTest(unittest.TestCase):
class MyItem(scrapy.Item):
foo = scrapy.Field()
egg = scrapy.Field()
baz = scrapy.Field()
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.temp_dir, ignore_errors=True)
def _random_temp_filename(self):
chars = [random.choice(ascii_letters + digits) for _ in range(15)]
filename = ''.join(chars)
return os.path.join(self.temp_dir, filename)
@defer.inlineCallbacks
def run_and_export(self, spider_cls, settings):
""" Run spider with specified settings; return exported data. """
FEEDS = settings.get('FEEDS') or {}
settings['FEEDS'] = {
urljoin('file:', pathname2url(str(file_path))): feed
for file_path, feed in FEEDS.items()
}
content = {}
try:
with MockServer() as s:
runner = CrawlerRunner(Settings(settings))
spider_cls.start_urls = [s.url('/')]
yield runner.crawl(spider_cls)
for file_path, feed in FEEDS.items():
if not os.path.exists(str(file_path)):
continue
with open(str(file_path), 'rb') as f:
content[feed['format']] = f.read()
finally:
for file_path in FEEDS.keys():
if not os.path.exists(str(file_path)):
continue
os.remove(str(file_path))
return content
@defer.inlineCallbacks
def exported_data(self, items, settings):
"""
Return exported data which a spider yielding ``items`` would return.
"""
class TestSpider(scrapy.Spider):
name = 'testspider'
def parse(self, response):
for item in items:
yield item
data = yield self.run_and_export(TestSpider, settings)
return data
@defer.inlineCallbacks
def exported_no_data(self, settings):
"""
Return exported data which a spider yielding no ``items`` would return.
"""
class TestSpider(scrapy.Spider):
name = 'testspider'
def parse(self, response):
pass
data = yield self.run_and_export(TestSpider, settings)
return data
@defer.inlineCallbacks
def assertExportedCsv(self, items, header, rows, settings=None, ordered=True):
settings = settings or {}
settings.update({
'FEEDS': {
self._random_temp_filename(): {'format': 'csv'},
},
})
data = yield self.exported_data(items, settings)
reader = csv.DictReader(to_unicode(data['csv']).splitlines())
got_rows = list(reader)
if ordered:
self.assertEqual(reader.fieldnames, header)
else:
self.assertEqual(set(reader.fieldnames), set(header))
self.assertEqual(rows, got_rows)
@defer.inlineCallbacks
def assertExportedJsonLines(self, items, rows, settings=None):
settings = settings or {}
settings.update({
'FEEDS': {
self._random_temp_filename(): {'format': 'jl'},
},
})
data = yield self.exported_data(items, settings)
parsed = [json.loads(to_unicode(line)) for line in data['jl'].splitlines()]
rows = [{k: v for k, v in row.items() if v} for row in rows]
self.assertEqual(rows, parsed)
@defer.inlineCallbacks
def assertExportedXml(self, items, rows, settings=None):
settings = settings or {}
settings.update({
'FEEDS': {
self._random_temp_filename(): {'format': 'xml'},
},
})
data = yield self.exported_data(items, settings)
rows = [{k: v for k, v in row.items() if v} for row in rows]
root = lxml.etree.fromstring(data['xml'])
got_rows = [{e.tag: e.text for e in it} for it in root.findall('item')]
self.assertEqual(rows, got_rows)
@defer.inlineCallbacks
def assertExportedMultiple(self, items, rows, settings=None):
settings = settings or {}
settings.update({
'FEEDS': {
self._random_temp_filename(): {'format': 'xml'},
self._random_temp_filename(): {'format': 'json'},
},
})
data = yield self.exported_data(items, settings)
rows = [{k: v for k, v in row.items() if v} for row in rows]
# XML
root = lxml.etree.fromstring(data['xml'])
xml_rows = [{e.tag: e.text for e in it} for it in root.findall('item')]
self.assertEqual(rows, xml_rows)
# JSON
json_rows = json.loads(to_unicode(data['json']))
self.assertEqual(rows, json_rows)
def _load_until_eof(self, data, load_func):
result = []
with tempfile.TemporaryFile() as temp:
temp.write(data)
temp.seek(0)
while True:
try:
result.append(load_func(temp))
except EOFError:
break
return result
@defer.inlineCallbacks
def assertExportedPickle(self, items, rows, settings=None):
settings = settings or {}
settings.update({
'FEEDS': {
self._random_temp_filename(): {'format': 'pickle'},
},
})
data = yield self.exported_data(items, settings)
expected = [{k: v for k, v in row.items() if v} for row in rows]
import pickle
result = self._load_until_eof(data['pickle'], load_func=pickle.load)
self.assertEqual(expected, result)
@defer.inlineCallbacks
def assertExportedMarshal(self, items, rows, settings=None):
settings = settings or {}
settings.update({
'FEEDS': {
self._random_temp_filename(): {'format': 'marshal'},
},
})
data = yield self.exported_data(items, settings)
expected = [{k: v for k, v in row.items() if v} for row in rows]
import marshal
result = self._load_until_eof(data['marshal'], load_func=marshal.load)
self.assertEqual(expected, result)
@defer.inlineCallbacks
def assertExported(self, items, header, rows, settings=None, ordered=True):
yield self.assertExportedCsv(items, header, rows, settings, ordered)
yield self.assertExportedJsonLines(items, rows, settings)
yield self.assertExportedXml(items, rows, settings)
yield self.assertExportedPickle(items, rows, settings)
yield self.assertExportedMarshal(items, rows, settings)
yield self.assertExportedMultiple(items, rows, settings)
@defer.inlineCallbacks
def test_export_items(self):
# feed exporters use field names from Item
items = [
self.MyItem({'foo': 'bar1', 'egg': 'spam1'}),
self.MyItem({'foo': 'bar2', 'egg': 'spam2', 'baz': 'quux2'}),
]
rows = [
{'egg': 'spam1', 'foo': 'bar1', 'baz': ''},
{'egg': 'spam2', 'foo': 'bar2', 'baz': 'quux2'}
]
header = self.MyItem.fields.keys()
yield self.assertExported(items, header, rows, ordered=False)
@defer.inlineCallbacks
def test_export_no_items_not_store_empty(self):
for fmt in ('json', 'jsonlines', 'xml', 'csv'):
settings = {
'FEEDS': {
self._random_temp_filename(): {'format': fmt},
},
}
data = yield self.exported_no_data(settings)
self.assertEqual(data[fmt], b'')
@defer.inlineCallbacks
def test_export_no_items_store_empty(self):
formats = (
('json', b'[]'),
('jsonlines', b''),
('xml', b'<?xml version="1.0" encoding="utf-8"?>\n<items></items>'),
('csv', b''),
)
for fmt, expctd in formats:
settings = {
'FEEDS': {
self._random_temp_filename(): {'format': fmt},
},
'FEED_STORE_EMPTY': True,
'FEED_EXPORT_INDENT': None,
}
data = yield self.exported_no_data(settings)
self.assertEqual(data[fmt], expctd)
@defer.inlineCallbacks
def test_export_no_items_multiple_feeds(self):
""" Make sure that `storage.store` is called for every feed. """
settings = {
'FEEDS': {
self._random_temp_filename(): {'format': 'json'},
self._random_temp_filename(): {'format': 'xml'},
self._random_temp_filename(): {'format': 'csv'},
},
'FEED_STORAGES': {'file': 'tests.test_feedexport.LogOnStoreFileStorage'},
'FEED_STORE_EMPTY': False
}
with LogCapture() as log:
yield self.exported_no_data(settings)
print(log)
self.assertEqual(str(log).count('Storage.store is called'), 3)
@defer.inlineCallbacks
def test_export_multiple_item_classes(self):
class MyItem2(scrapy.Item):
foo = scrapy.Field()
hello = scrapy.Field()
items = [
self.MyItem({'foo': 'bar1', 'egg': 'spam1'}),
MyItem2({'hello': 'world2', 'foo': 'bar2'}),
self.MyItem({'foo': 'bar3', 'egg': 'spam3', 'baz': 'quux3'}),
{'hello': 'world4', 'egg': 'spam4'},
]
# by default, Scrapy uses fields of the first Item for CSV and
# all fields for JSON Lines
header = self.MyItem.fields.keys()
rows_csv = [
{'egg': 'spam1', 'foo': 'bar1', 'baz': ''},
{'egg': '', 'foo': 'bar2', 'baz': ''},
{'egg': 'spam3', 'foo': 'bar3', 'baz': 'quux3'},
{'egg': 'spam4', 'foo': '', 'baz': ''},
]
rows_jl = [dict(row) for row in items]
yield self.assertExportedCsv(items, header, rows_csv, ordered=False)
yield self.assertExportedJsonLines(items, rows_jl)
# edge case: FEED_EXPORT_FIELDS==[] means the same as default None
settings = {'FEED_EXPORT_FIELDS': []}
yield self.assertExportedCsv(items, header, rows_csv, ordered=False)
yield self.assertExportedJsonLines(items, rows_jl, settings)
# it is possible to override fields using FEED_EXPORT_FIELDS
header = ["foo", "baz", "hello"]
settings = {'FEED_EXPORT_FIELDS': header}
rows = [
{'foo': 'bar1', 'baz': '', 'hello': ''},
{'foo': 'bar2', 'baz': '', 'hello': 'world2'},
{'foo': 'bar3', 'baz': 'quux3', 'hello': ''},
{'foo': '', 'baz': '', 'hello': 'world4'},
]
yield self.assertExported(items, header, rows,
settings=settings, ordered=True)
@defer.inlineCallbacks
def test_export_dicts(self):
# When dicts are used, only keys from the first row are used as
# a header for CSV, and all fields are used for JSON Lines.
items = [
{'foo': 'bar', 'egg': 'spam'},
{'foo': 'bar', 'egg': 'spam', 'baz': 'quux'},
]
rows_csv = [
{'egg': 'spam', 'foo': 'bar'},
{'egg': 'spam', 'foo': 'bar'}
]
rows_jl = items
yield self.assertExportedCsv(items, ['egg', 'foo'], rows_csv, ordered=False)
yield self.assertExportedJsonLines(items, rows_jl)
@defer.inlineCallbacks
def test_export_feed_export_fields(self):
# FEED_EXPORT_FIELDS option allows to order export fields
# and to select a subset of fields to export, both for Items and dicts.
for item_cls in [self.MyItem, dict]:
items = [
item_cls({'foo': 'bar1', 'egg': 'spam1'}),
item_cls({'foo': 'bar2', 'egg': 'spam2', 'baz': 'quux2'}),
]
# export all columns
settings = {'FEED_EXPORT_FIELDS': 'foo,baz,egg'}
rows = [
{'egg': 'spam1', 'foo': 'bar1', 'baz': ''},
{'egg': 'spam2', 'foo': 'bar2', 'baz': 'quux2'}
]
yield self.assertExported(items, ['foo', 'baz', 'egg'], rows,
settings=settings, ordered=True)
# export a subset of columns
settings = {'FEED_EXPORT_FIELDS': 'egg,baz'}
rows = [
{'egg': 'spam1', 'baz': ''},
{'egg': 'spam2', 'baz': 'quux2'}
]
yield self.assertExported(items, ['egg', 'baz'], rows,
settings=settings, ordered=True)
@defer.inlineCallbacks
def test_export_encoding(self):
|
@defer.inlineCallbacks
def test_export_multiple_configs(self):
items = [dict({'foo': u'FOO', 'bar': u'BAR'})]
formats = {
'json': '[\n{"bar": "BAR"}\n]'.encode('utf-8'),
'xml': (
'<?xml version="1.0" encoding="latin-1"?>\n'
'<items>\n <item>\n <foo>FOO</foo>\n </item>\n</items>'
).encode('latin-1'),
'csv': 'bar,foo\r\nBAR,FOO\r\n'.encode('utf-8'),
}
settings = {
'FEEDS': {
self._random_temp_filename(): {
'format': 'json',
'indent': 0,
'fields': ['bar'],
'encoding': 'utf-8',
},
self._random_temp_filename(): {
'format': 'xml',
'indent': 2,
'fields': ['foo'],
'encoding': 'latin-1',
},
self._random_temp_filename(): {
'format': 'csv',
'indent': None,
'fields': ['bar', 'foo'],
'encoding': 'utf-8',
},
},
}
data = yield self.exported_data(items, settings)
for fmt, expected in formats.items():
self.assertEqual(expected, data[fmt])
@defer.inlineCallbacks
def test_export_indentation(self):
items = [
{'foo': ['bar']},
{'key': 'value'},
]
test_cases = [
# JSON
{
'format': 'json',
'indent': None,
'expected': b'[{"foo": ["bar"]},{"key": "value"}]',
},
{
'format': 'json',
'indent': -1,
'expected': b"""[
{"foo": ["bar"]},
{"key": "value"}
]""",
},
{
'format': 'json',
'indent': 0,
'expected': b"""[
{"foo": ["bar"]},
{"key": "value"}
]""",
},
{
'format': 'json',
'indent': 2,
'expected': b"""[
{
"foo": [
"bar"
]
},
{
"key": "value"
}
]""",
},
{
'format': 'json',
'indent': 4,
'expected': b"""[
{
"foo": [
"bar"
]
},
{
"key": "value"
}
]""",
},
{
'format': 'json',
'indent': 5,
'expected': b"""[
{
"foo": [
"bar"
]
},
{
"key": "value"
}
]""",
},
# XML
{
'format': 'xml',
'indent': None,
'expected': b"""<?xml version="1.0" encoding="utf-8"?>
<items><item><foo><value>bar</value></foo></item><item><key>value</key></item></items>""",
},
{
'format': 'xml',
'indent': -1,
'expected': b"""<?xml version="1.0" encoding="utf-8"?>
<items>
<item><foo><value>bar</value></foo></item>
<item><key>value</key></item>
</items>""",
},
{
'format': 'xml',
'indent': 0,
'expected': b"""<?xml version="1.0" encoding="utf-8"?>
<items>
<item><foo><value>bar</value></foo></item>
<item><key>value</key></item>
</items>""",
},
{
'format': 'xml',
'indent': 2,
'expected': b"""<?xml version="1.0" encoding="utf-8"?>
<items>
<item>
<foo>
<value>bar</value>
</foo>
</item>
<item>
<key>value</key>
</item>
</items>""",
},
{
'format': 'xml',
'indent': 4,
'expected': b"""<?xml version="1.0" encoding="utf-8"?>
<items>
<item>
<foo>
<value>bar</value>
</foo>
</item>
<item>
<key>value</key>
</item>
</items>""",
},
{
'format': 'xml',
'indent': 5,
'expected': b"""<?xml version="1.0" encoding="utf-8"?>
<items>
<item>
<foo>
<value>bar</value>
</foo>
</item>
<item>
<key>value</key>
</item>
</items>""",
},
]
for row in test_cases:
settings = {
'FEEDS': {
self._random_temp_filename(): {
'format': row['format'],
'indent': row['indent'],
},
},
}
data = yield self.exported_data(items, settings)
self.assertEqual(row['expected'], data[row['format']])
@defer.inlineCallbacks
def test_init_exporters_storages_with_crawler(self):
settings = {
'FEED_EXPORTERS': {'csv': 'tests.test_feedexport.FromCrawlerCsvItemExporter'},
'FEED_STORAGES': {'file': 'tests.test_feedexport.FromCrawlerFileFeedStorage'},
'FEEDS': {
self._random_temp_filename(): {'format': 'csv'},
},
}
yield self.exported_data(items=[], settings=settings)
self.assertTrue(FromCrawlerCsvItemExporter.init_with_crawler)
self.assertTrue(FromCrawlerFileFeedStorage.init_with_crawler)
@defer.inlineCallbacks
def test_pathlib_uri(self):
feed_path = Path(self._random_temp_filename())
settings = {
'FEED_STORE_EMPTY': True,
'FEEDS': {
feed_path: {'format': 'csv'}
},
}
data = yield self.exported_no_data(settings)
self.assertEqual(data['csv'], b'')
|
items = [dict({'foo': u'Test\xd6'})]
formats = {
'json': '[{"foo": "Test\\u00d6"}]'.encode('utf-8'),
'jsonlines': '{"foo": "Test\\u00d6"}\n'.encode('utf-8'),
'xml': (
'<?xml version="1.0" encoding="utf-8"?>\n'
'<items><item><foo>Test\xd6</foo></item></items>'
).encode('utf-8'),
'csv': 'foo\r\nTest\xd6\r\n'.encode('utf-8'),
}
for fmt, expected in formats.items():
settings = {
'FEEDS': {
self._random_temp_filename(): {'format': fmt},
},
'FEED_EXPORT_INDENT': None,
}
data = yield self.exported_data(items, settings)
self.assertEqual(expected, data[fmt])
formats = {
'json': '[{"foo": "Test\xd6"}]'.encode('latin-1'),
'jsonlines': '{"foo": "Test\xd6"}\n'.encode('latin-1'),
'xml': (
'<?xml version="1.0" encoding="latin-1"?>\n'
'<items><item><foo>Test\xd6</foo></item></items>'
).encode('latin-1'),
'csv': 'foo\r\nTest\xd6\r\n'.encode('latin-1'),
}
for fmt, expected in formats.items():
settings = {
'FEEDS': {
self._random_temp_filename(): {'format': fmt},
},
'FEED_EXPORT_INDENT': None,
'FEED_EXPORT_ENCODING': 'latin-1',
}
data = yield self.exported_data(items, settings)
self.assertEqual(expected, data[fmt])
|
conf.rs
|
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct DbConf {
pub db_size: usize,
pub no_sync: bool,
pub allow_conf_resources_only: bool,
pub resource_defaults: ResourceDefaults,
pub resources: HashMap<String, ResourceConf>
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ResourceConf {
pub id_attr_name: Option<String>,
pub id_attr_type: Option<String>,
pub indices: Vec<IndexConf>
}
#[derive(Debug, Serialize, Deserialize)]
pub struct
|
{
pub id_attr_name: String,
pub id_attr_type: String
}
#[derive(Debug, Serialize, Deserialize)]
pub struct IndexConf {
pub attr_path: String,
pub unique: Option<bool>
}
|
ResourceDefaults
|
actions.py
|
import json
'''
crash course on APM & PX4 flight modes:
APM:
Stabilize
Alt Hold
Loiter
RTL (Return-to-Launch)
Auto
Additional flight modes:
Acro
AutoTune
Brake
Circle
|
Guided (and Guided_NoGPS)
Land
PosHold
Sport
Throw
Follow Me
Simple and Super Simple
Avoid_ADSB for ADS-B based avoidance of manned aircraft. Should not be set-up as a pilot selectable flight mode.
PX4:
MANUAL
Fixed wing aircraft/ rovers / boats:
MANUAL
STABILIZED
Multirotors:
ACRO
RATTITUDE
ANGLE
ASSISTED
ALTCTL
POSCTL
AUTO
AUTO_LOITER
AUTO_RTL
AUTO_MISSION
'''
class DummyPyAction(object):
def __init__(self, a):
self.a = a
def dummy(self, b, c):
print("DEBUG", self.a, b, c)
merged = int(self.a) * int(b) * int(c)
return json.dumps(merged)
|
Drift
|
7up-full.spec.ts
|
import {expect, use} from 'chai';
import {Contract, ethers, BigNumber} from 'ethers';
import {deployContract, MockProvider, solidity} from 'ethereum-waffle';
import SevenUp from '../build/SevenUpPool.json';
import ERC20 from '../build/ERC20Token.json';
import { BigNumber as BN } from 'bignumber.js'
use(solidity);
function convertBigNumber(bnAmount: BigNumber, divider: number) {
return new BN(bnAmount.toString()).dividedBy(new BN(divider)).toFixed();
}
// describe('7up-full', () => {
// let provider = new MockProvider();
// const [walletMe, walletOther, walletPool, newGovernor, walletTeam, walletInit] = provider.getWallets();
// let tokenUSDT : Contract;
// let tokenFIL : Contract;
// let sevenContract : Contract;
// let masterChef : Contract;
// let tx: any;
// let receipt: any;
// async function getBlockNumber() {
// const blockNumber = await provider.getBlockNumber()
// console.log("Current block number: " + blockNumber);
// return blockNumber;
// }
// before(async () => {
// sevenContract = await deployContract(walletMe, SevenUp);
// tokenUSDT = await deployContract(walletOther, ERC20, ['USDT', 'USDT', 18, ethers.utils.parseEther('1000000')]);
// tokenFIL = await deployContract(walletMe, ERC20, ['File Coin', 'FIL', 18, ethers.utils.parseEther('1000000')]);
// await sevenContract.connect(walletMe).init(tokenFIL.address, tokenUSDT.address);
// await sevenContract.connect(walletMe).updatePledgeRate(5000); // 60% pledge rate
// await sevenContract.connect(walletMe).updatePledgePrice(200); // 0.02 FIL = 1 USDT
// await sevenContract.connect(walletMe).updateLiquidationRate(9000); // 90% liquidation rate
// console.log('walletMe = ', walletMe.address);
// console.log('walletOther = ', walletOther.address);
// console.log('7up address = ', sevenContract.address);
// console.log('USDT address = ', tokenUSDT.address);
// console.log('FIL address = ', tokenFIL.address);
// await tokenFIL.connect(walletMe).approve(sevenContract.address, ethers.utils.parseEther('1000000'));
// await tokenFIL.connect(walletOther).approve(sevenContract.address, ethers.utils.parseEther('1000000'));
// await tokenUSDT.connect(walletOther).approve(sevenContract.address, ethers.utils.parseEther('1000000'));
// await tokenFIL.connect(walletMe).transfer(walletOther.address, ethers.utils.parseEther('100000'));
// });
// async function sevenInfo() {
// let result = {
// interestPerSupply: await sevenContract.interestPerSupply(),
// liquidationPerSupply: await sevenContract.liquidationPerSupply(),
// interestPerBorrow : await sevenContract.interestPerBorrow(),
// totalLiquidation: await sevenContract.totalLiquidation(),
// totalLiquidationSupplyAmount: await sevenContract.totalLiquidationSupplyAmount(),
// totalBorrow: await sevenContract.totalBorrow(),
// totalPledge: await sevenContract.totalPledge(),
// remainSupply: await sevenContract.remainSupply(),
// pledgeRate: await sevenContract.pledgeRate(),
// pledgePrice: await sevenContract.pledgePrice(),
// liquidationRate: await sevenContract.liquidationRate(),
// baseInterests: await sevenContract.baseInterests(),
// marketFrenzy: await sevenContract.marketFrenzy(),
// lastInterestUpdate: await sevenContract.lastInterestUpdate()
// };
// console.log('===sevenInfo begin===');
// for (let k in result) {
// console.log(k+':', convertBigNumber(result[k], 1))
// }
// console.log('===sevenInfo end===')
// return result;
// };
// async function SupplyStruct(user:any) {
// let result = await sevenContract.supplys(user);
// console.log('===SupplyStruct begin===');
// for (let k in result) {
// console.log(k+':', convertBigNumber(result[k], 1))
// }
// console.log('===SupplyStruct end===');
// return result;
// };
// async function BorrowStruct(user:any) {
// let result = await sevenContract.borrows(user);
// console.log('===BorrowStruct begin===');
// for (let k in result) {
// console.log(k+':', convertBigNumber(result[k], 1))
// }
// console.log('===BorrowStruct end===');
// return result;
// };
// it('simple deposit & withdraw', async() => {
// await sevenContract.connect(walletMe).deposit(ethers.utils.parseEther('1000'), walletMe.address);
// console.log(convertBigNumber((await sevenContract.supplys(walletMe.address)).amountSupply, 1));
// // expect(convertBigNumber((await sevenContract.supplys(walletMe.address)).amountSupply, 1)).to.equals('1000');
// // expect(convertBigNumber(await sevenContract.remainSupply(), 1)).to.equals('1000');
// await sevenContract.connect(walletMe).withdraw(ethers.utils.parseEther('500'), walletMe.address);
|
// // expect(convertBigNumber(await sevenContract.remainSupply(), 1)).to.equals('500');
// await sevenContract.connect(walletMe).withdraw(ethers.utils.parseEther('500'), walletMe.address);
// // expect(convertBigNumber(await tokenFIL.balanceOf(walletMe.address), 1)).to.equals('900000');
// // expect(convertBigNumber((await sevenContract.supplys(walletMe.address)).amountSupply, 1)).to.equals('0');
// // expect(convertBigNumber(await sevenContract.remainSupply(), 1)).to.equals('0');
// });
// it('deposit(1000) -> borrow(100) -> repay(100) -> withdraw(1000)', async() => {
// await sevenContract.connect(walletMe).deposit(ethers.utils.parseEther('1000'), walletMe.address);
// console.log('after deposit: ',
// convertBigNumber(await tokenFIL.balanceOf(sevenContract.address), 1),
// convertBigNumber(await tokenUSDT.balanceOf(sevenContract.address), 1));
// let maxBorrow = await sevenContract.getMaximumBorrowAmount(ethers.utils.parseEther('10000'));
// console.log('maxBorrow:', convertBigNumber(maxBorrow, 1));
// await sevenContract.connect(walletOther).borrow(ethers.utils.parseEther('10000'), maxBorrow, walletOther.address);
// console.log('after borrow: ',
// convertBigNumber(await tokenUSDT.balanceOf(walletOther.address), 1),
// convertBigNumber(await tokenFIL.balanceOf(walletOther.address), 1),
// convertBigNumber(await tokenFIL.balanceOf(sevenContract.address), 1),
// convertBigNumber(await tokenUSDT.balanceOf(sevenContract.address), 1));
// await sevenInfo();
// console.log('getInterests:', convertBigNumber(await sevenContract.getInterests(),1));
// tx = await sevenContract.connect(walletOther).repay(ethers.utils.parseEther('10000'), walletOther.address);
// let receipt = await tx.wait()
// console.log('repay gas:', receipt.gasUsed.toString())
// // console.log('events:', receipt.events)
// // console.log(receipt.events[2].event, 'args:', receipt.events[2].args)
// console.log('_supplyAmount:', convertBigNumber(receipt.events[2].args._supplyAmount, 1))
// console.log('_collateralAmount:', convertBigNumber(receipt.events[2].args._collateralAmount, 1))
// console.log('_interestAmount:', convertBigNumber(receipt.events[2].args._interestAmount, 1))
// console.log('after repay: ',
// convertBigNumber(await tokenFIL.balanceOf(sevenContract.address), 1),
// convertBigNumber(await tokenUSDT.balanceOf(sevenContract.address), 1));
// await sevenInfo();
// await SupplyStruct(walletMe.address);
// console.log('withdraw:', convertBigNumber(ethers.utils.parseEther('1000'),1));
// await sevenContract.connect(walletMe).withdraw(ethers.utils.parseEther('1000'), walletMe.address);
// console.log('after withdraw: ',
// convertBigNumber(await tokenFIL.balanceOf(sevenContract.address), 1),
// convertBigNumber(await tokenUSDT.balanceOf(sevenContract.address), 1));
// });
// it('deposit(1000) -> borrow(100) -> liquidation(100) -> withdraw(1000)', async() => {
// await sevenContract.connect(walletMe).deposit(ethers.utils.parseEther('1000'), walletMe.address);
// console.log('after deposit: ',
// convertBigNumber(await tokenFIL.balanceOf(sevenContract.address), 1),
// convertBigNumber(await tokenUSDT.balanceOf(sevenContract.address), 1));
// let maxBorrow = await sevenContract.getMaximumBorrowAmount(ethers.utils.parseEther('10000'));
// await sevenContract.connect(walletOther).borrow(ethers.utils.parseEther('10000'), maxBorrow, walletOther.address);
// console.log('after borrow: ',
// convertBigNumber(await tokenFIL.balanceOf(sevenContract.address), 1),
// convertBigNumber(await tokenUSDT.balanceOf(sevenContract.address), 1));
// await sevenContract.connect(walletMe).updatePledgePrice(100); // 0.01 FIL = 1 USDT
// await sevenContract.connect(walletMe).liquidation(walletOther.address, walletMe.address);
// console.log('after liquidation: ',
// convertBigNumber(await tokenFIL.balanceOf(sevenContract.address), 1),
// convertBigNumber(await tokenUSDT.balanceOf(sevenContract.address), 1));
// await sevenInfo();
// // console.log('getInterests:', convertBigNumber(await sevenContract.getInterests(),1));
// await SupplyStruct(walletMe.address);
// // console.log('withdraw:', convertBigNumber(ethers.utils.parseEther('1000'),1));
// await sevenContract.connect(walletMe).withdraw(ethers.utils.parseEther('1000'), walletMe.address);
// console.log('after withdraw: ',
// convertBigNumber(await tokenFIL.balanceOf(sevenContract.address), 1),
// convertBigNumber(await tokenUSDT.balanceOf(sevenContract.address), 1));
// await sevenInfo();
// });
// });
|
// // expect(convertBigNumber(await tokenFIL.balanceOf(walletMe.address), 1)).to.equals('899500');
// // expect(convertBigNumber((await sevenContract.supplys(walletMe.address)).amountSupply, 1)).to.equals('500');
|
demo.min.js
|
!function(){"use strict";var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{};var t,r=(function(e,t){function r(e,t){for(var r=0;r<e.length;++r)if(e[r]===t)return r;return-1}function n(e,t){var n=[],i=[];return null==t&&(t=function(e,t){return n[0]===t?"[Circular ~]":"[Circular ~."+i.slice(0,r(n,t)).join(".")+"]"}),function(o,a){if(n.length>0){var s=r(n,this);~s?n.splice(s+1):n.push(this),~s?i.splice(s,1/0,o):i.push(o),~r(n,a)&&(a=t.call(this,o,a))}else n.push(a);return null==e?a instanceof Error?function(e){var t={stack:e.stack,message:e.message,name:e.name};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&(t[r]=e[r]);return t}(a):a:e.call(this,o,a)}}(e.exports=function(e,t,r,i){return JSON.stringify(e,n(t,i),r)}).getSerialize=n}(t={exports:{}},t.exports),t.exports),n=r.getSerialize,i=Object.freeze({default:r,__moduleExports:r,getSerialize:n}),o=i&&r||i,a="undefined"!=typeof window?window:void 0!==e?e:"undefined"!=typeof self?self:{};function s(e){return void 0===e}function c(e){return"[object Object]"===Object.prototype.toString.call(e)}function l(e){return"[object String]"===Object.prototype.toString.call(e)}function u(e){return"[object Array]"===Object.prototype.toString.call(e)}function p(){try{return new ErrorEvent(""),!0}catch(e){return!1}}function h(){if(!("fetch"in a))return!1;try{return new Headers,new Request(""),new Response,!0}catch(e){return!1}}function f(e,t){var r,n;if(s(e.length))for(r in e)g(e,r)&&t.call(null,r,e[r]);else if(n=e.length)for(r=0;r<n;r++)t.call(null,r,e[r])}function d(e,t){if("number"!=typeof t)throw new Error("2nd argument to `truncate` function should be a number");return"string"!=typeof e||0===t?e:e.length<=t?e:e.substr(0,t)+"…"}function g(e,t){return Object.prototype.hasOwnProperty.call(e,t)}function _(e){for(var t,r=[],n=0,i=e.length;n<i;n++)l(t=e[n])?r.push(t.replace(/([.*+?^=!:${}()|\[\]\/\\])/g,"\\$1")):t&&t.source&&r.push(t.source);return new RegExp(r.join("|"),"i")}function v(e){var t,r,n,i,o,a=[];if(!e||!e.tagName)return"";if(a.push(e.tagName.toLowerCase()),e.id&&a.push("#"+e.id),(t=e.className)&&l(t))for(r=t.split(/\s+/),o=0;o<r.length;o++)a.push("."+r[o]);var s=["type","name","title","alt"];for(o=0;o<s.length;o++)n=s[o],(i=e.getAttribute(n))&&a.push("["+n+'="'+i+'"]');return a.join("")}function m(e,t){return!!(!!e^!!t)}function b(e,t){if(m(e,t))return!1;var r,n,i=e.frames,o=t.frames;if(i.length!==o.length)return!1;for(var a=0;a<i.length;a++)if(r=i[a],n=o[a],r.filename!==n.filename||r.lineno!==n.lineno||r.colno!==n.colno||r.function!==n.function)return!1;return!0}var y=3,E=51200,w=40;function x(e){return function(e){return~-encodeURI(e).split(/%..|./).length}(JSON.stringify(e))}function k(e){if("string"==typeof e){return d(e,40)}if("number"==typeof e||"boolean"==typeof e||void 0===e)return e;var t=Object.prototype.toString.call(e);return"[object Object]"===t?"[Object]":"[object Array]"===t?"[Array]":"[object Function]"===t?e.name?"[Function: "+e.name+"]":"[Function]":e}var S={isObject:function(e){return"object"==typeof e&&null!==e},isError:function(e){switch({}.toString.call(e)){case"[object Error]":case"[object Exception]":case"[object DOMException]":return!0;default:return e instanceof Error}},isErrorEvent:function(e){return p()&&"[object ErrorEvent]"==={}.toString.call(e)},isUndefined:s,isFunction:function(e){return"function"==typeof e},isPlainObject:c,isString:l,isArray:u,isEmptyObject:function(e){if(!c(e))return!1;for(var t in e)if(e.hasOwnProperty(t))return!1;return!0},supportsErrorEvent:p,supportsFetch:h,supportsReferrerPolicy:function(){if(!h())return!1;try{return new Request("pickleRick",{referrerPolicy:"origin"}),!0}catch(e){return!1}},supportsPromiseRejectionEvent:function(){return"function"==typeof PromiseRejectionEvent},wrappedCallback:function(e){return function(t,r){var n=e(t)||t;return r&&r(n)||n}},each:f,objectMerge:function(e,t){return t?(f(t,function(t,r){e[t]=r}),e):e},truncate:d,objectFrozen:function(e){return!!Object.isFrozen&&Object.isFrozen(e)},hasKey:g,joinRegExp:_,urlencode:function(e){var t=[];return f(e,function(e,r){t.push(encodeURIComponent(e)+"="+encodeURIComponent(r))}),t.join("&")},uuid4:function(){var e=a.crypto||a.msCrypto;if(!s(e)&&e.getRandomValues){var t=new Uint16Array(8);e.getRandomValues(t),t[3]=4095&t[3]|16384,t[4]=16383&t[4]|32768;var r=function(e){for(var t=e.toString(16);t.length<4;)t="0"+t;return t};return r(t[0])+r(t[1])+r(t[2])+r(t[3])+r(t[4])+r(t[5])+r(t[6])+r(t[7])}return"xxxxxxxxxxxx4xxxyxxxxxxxxxxxxxxx".replace(/[xy]/g,function(e){var t=16*Math.random()|0;return("x"===e?t:3&t|8).toString(16)})},htmlTreeAsString:function(e){for(var t,r=[],n=0,i=0,o=" > ".length;e&&n++<5&&!("html"===(t=v(e))||n>1&&i+r.length*o+t.length>=80);)r.push(t),i+=t.length,e=e.parentNode;return r.reverse().join(" > ")},htmlElementAsString:v,isSameException:function(e,t){return!m(e,t)&&(e=e.values[0],t=t.values[0],e.type===t.type&&e.value===t.value&&(r=e.stacktrace,n=t.stacktrace,(!s(r)||!s(n))&&b(e.stacktrace,t.stacktrace)));var r,n},isSameStacktrace:b,parseUrl:function(e){if("string"!=typeof e)return{};var t=e.match(/^(([^:\/?#]+):)?(\/\/([^\/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?$/),r=t[6]||"",n=t[8]||"";return{protocol:t[2],host:t[4],path:t[5],relative:t[5]+r+n}},fill:function(e,t,r,n){if(null!=e){var i=e[t];e[t]=r(i),e[t].__raven__=!0,e[t].__orig__=i,n&&n.push([e,t,i])}},safeJoin:function(e,t){if(!u(e))return"";for(var r=[],n=0;n<e.length;n++)try{r.push(String(e[n]))}catch(e){r.push("[value cannot be serialized]")}return r.join(t)},serializeException:function e(t,r,n){if(!c(t))return t;n="number"!=typeof(r="number"!=typeof r?y:r)?E:n;var i=function e(t,r){return 0===r?k(t):c(t)?Object.keys(t).reduce(function(n,i){return n[i]=e(t[i],r-1),n},{}):Array.isArray(t)?t.map(function(t){return e(t,r-1)}):k(t)}(t,r);return x(o(i))>n?e(t,r-1):i},serializeKeysForMessage:function(e,t){if("number"==typeof e||"string"==typeof e)return e.toString();if(!Array.isArray(e))return"";if(0===(e=e.filter(function(e){return"string"==typeof e})).length)return"[object has no keys]";if(t="number"!=typeof t?w:t,e[0].length>=t)return e[0];for(var r=e.length;r>0;r--){var n=e.slice(0,r).join(", ");if(!(n.length>t))return r===e.length?n:n+"…"}return""},sanitize:function(e,t){if(!u(t)||u(t)&&0===t.length)return e;var r,n=_(t),i="********";try{r=JSON.parse(o(e))}catch(t){return e}return function e(t){return u(t)?t.map(function(t){return e(t)}):c(t)?Object.keys(t).reduce(function(r,o){return n.test(o)?r[o]=i:r[o]=e(t[o]),r},{}):t}(r)}},O=S.isObject,j=S.isError,C=S.isErrorEvent,R=S.isUndefined,T=S.isFunction,F=S.isPlainObject,A=S.isString,B=S.isArray,P=S.isEmptyObject,D=S.supportsErrorEvent,M=S.supportsFetch,U=S.supportsReferrerPolicy,H=S.supportsPromiseRejectionEvent,L=S.wrappedCallback,I=S.each,N=S.objectMerge,z=S.truncate,q=S.objectFrozen,K=S.hasKey,W=S.joinRegExp,V=S.urlencode,J=S.uuid4,$=S.htmlTreeAsString,X=S.htmlElementAsString,G=S.isSameException,Y=S.isSameStacktrace,Z=S.parseUrl,Q=S.fill,ee=S.safeJoin,te=S.serializeException,re=S.serializeKeysForMessage,ne=S.sanitize,ie=Object.freeze({default:S,__moduleExports:S,isObject:O,isError:j,isErrorEvent:C,isUndefined:R,isFunction:T,isPlainObject:F,isString:A,isArray:B,isEmptyObject:P,supportsErrorEvent:D,supportsFetch:M,supportsReferrerPolicy:U,supportsPromiseRejectionEvent:H,wrappedCallback:L,each:I,objectMerge:N,truncate:z,objectFrozen:q,hasKey:K,joinRegExp:W,urlencode:V,uuid4:J,htmlTreeAsString:$,htmlElementAsString:X,isSameException:G,isSameStacktrace:Y,parseUrl:Z,fill:Q,safeJoin:ee,serializeException:te,serializeKeysForMessage:re,sanitize:ne}),oe=ie&&S||ie,ae={collectWindowErrors:!0,debug:!1},se="undefined"!=typeof window?window:void 0!==e?e:"undefined"!=typeof self?self:{},ce=[].slice,le="?",ue=/^(?:[Uu]ncaught (?:exception: )?)?(?:((?:Eval|Internal|Range|Reference|Syntax|Type|URI|)Error): )?(.*)$/;function pe(){return"undefined"==typeof document||null==document.location?"":document.location.href}ae.report=function(){var e,t,r=[],n=null,i=null,o=null;function a(e,t){var n=null;if(!t||ae.collectWindowErrors){for(var i in r)if(r.hasOwnProperty(i))try{r[i].apply(null,[e].concat(ce.call(arguments,2)))}catch(e){n=e}if(n)throw n}}function s(t,r,n,i,s){var l=oe.isErrorEvent(s)?s.error:s,u=oe.isErrorEvent(t)?t.message:t;if(o)ae.computeStackTrace.augmentStackTraceWithInitialElement(o,r,n,u),c();else if(l&&oe.isError(l))a(ae.computeStackTrace(l),!0);else{var p,h={url:r,line:n,column:i},f=void 0;if("[object String]"==={}.toString.call(u))(p=u.match(ue))&&(f=p[1],u=p[2]);h.func=le,a({name:f,message:u,url:pe(),stack:[h]},!0)}return!!e&&e.apply(this,arguments)}function c(){
|
ar e=o,t=n;n=null,o=null,i=null,a.apply(null,[e,!1].concat(t))}function l(e,t){var r=ce.call(arguments,1);if(o){if(i===e)return;c()}var a=ae.computeStackTrace(e);if(o=a,i=e,n=r,setTimeout(function(){i===e&&c()},a.incomplete?2e3:0),!1!==t)throw e}return l.subscribe=function(n){t||(e=se.onerror,se.onerror=s,t=!0),r.push(n)},l.unsubscribe=function(e){for(var t=r.length-1;t>=0;--t)r[t]===e&&r.splice(t,1)},l.uninstall=function(){t&&(se.onerror=e,t=!1,e=void 0),r=[]},l}(),ae.computeStackTrace=function(){function e(e){if(void 0!==e.stack&&e.stack){for(var t,r,n,i=/^\s*at (?:(.*?) ?\()?((?:file|https?|blob|chrome-extension|native|eval|webpack|<anonymous>|[a-z]:|\/).*?)(?::(\d+))?(?::(\d+))?\)?\s*$/i,o=/^\s*at (?:((?:\[object object\])?.+) )?\(?((?:file|ms-appx(?:-web)|https?|webpack|blob):.*?):(\d+)(?::(\d+))?\)?\s*$/i,a=/^\s*(.*?)(?:\((.*?)\))?(?:^|@)((?:file|https?|blob|chrome|webpack|resource|moz-extension).*?:\/.*?|\[native code\]|[^@]*bundle)(?::(\d+))?(?::(\d+))?\s*$/i,s=/(\S+) line (\d+)(?: > eval line \d+)* > eval/i,c=/\((\S*)(?::(\d+))(?::(\d+))\)/,l=e.stack.split("\n"),u=[],p=(/^(.*) is undefined$/.exec(e.message),0),h=l.length;p<h;++p){if(r=i.exec(l[p])){var f=r[2]&&0===r[2].indexOf("native");r[2]&&0===r[2].indexOf("eval")&&(t=c.exec(r[2]))&&(r[2]=t[1],r[3]=t[2],r[4]=t[3]),n={url:f?null:r[2],func:r[1]||le,args:f?[r[2]]:[],line:r[3]?+r[3]:null,column:r[4]?+r[4]:null}}else if(r=o.exec(l[p]))n={url:r[2],func:r[1]||le,args:[],line:+r[3],column:r[4]?+r[4]:null};else{if(!(r=a.exec(l[p])))continue;r[3]&&r[3].indexOf(" > eval")>-1&&(t=s.exec(r[3]))?(r[3]=t[1],r[4]=t[2],r[5]=null):0!==p||r[5]||void 0===e.columnNumber||(u[0].column=e.columnNumber+1),n={url:r[3],func:r[1]||le,args:r[2]?r[2].split(","):[],line:r[4]?+r[4]:null,column:r[5]?+r[5]:null}}!n.func&&n.line&&(n.func=le),u.push(n)}return u.length?{name:e.name,message:e.message,url:pe(),stack:u}:null}}function t(e,t,r,n){var i={url:t,line:r};if(i.url&&i.line){if(e.incomplete=!1,i.func||(i.func=le),e.stack.length>0&&e.stack[0].url===i.url){if(e.stack[0].line===i.line)return!1;if(!e.stack[0].line&&e.stack[0].func===i.func)return e.stack[0].line=i.line,!1}return e.stack.unshift(i),e.partial=!0,!0}return e.incomplete=!0,!1}function r(e,i){for(var o,a,s=/function\s+([_$a-zA-Z\xA0-\uFFFF][_$a-zA-Z0-9\xA0-\uFFFF]*)?\s*\(/i,c=[],l={},u=!1,p=r.caller;p&&!u;p=p.caller)if(p!==n&&p!==ae.report){if(a={url:null,func:le,line:null,column:null},p.name?a.func=p.name:(o=s.exec(p.toString()))&&(a.func=o[1]),void 0===a.func)try{a.func=o.input.substring(0,o.input.indexOf("{"))}catch(e){}l[""+p]?u=!0:l[""+p]=!0,c.push(a)}i&&c.splice(0,i);var h={name:e.name,message:e.message,url:pe(),stack:c};return t(h,e.sourceURL||e.fileName,e.line||e.lineNumber,e.message||e.description),h}function n(t,n){var i=null;n=null==n?0:+n;try{if(i=e(t))return i}catch(e){if(ae.debug)throw e}try{if(i=r(t,n+1))return i}catch(e){if(ae.debug)throw e}return{name:t.name,message:t.message,url:pe()}}return n.augmentStackTraceWithInitialElement=t,n.computeStackTraceFromStackProp=e,n}();var he=ae,fe=Object.freeze({default:he,__moduleExports:he});function de(e,t){var r=(65535&e)+(65535&t);return(e>>16)+(t>>16)+(r>>16)<<16|65535&r}function ge(e,t,r,n,i,o){return de((a=de(de(t,e),de(n,o)))<<(s=i)|a>>>32-s,r);var a,s}function _e(e,t,r,n,i,o,a){return ge(t&r|~t&n,e,t,i,o,a)}function ve(e,t,r,n,i,o,a){return ge(t&n|r&~n,e,t,i,o,a)}function me(e,t,r,n,i,o,a){return ge(t^r^n,e,t,i,o,a)}function be(e,t,r,n,i,o,a){return ge(r^(t|~n),e,t,i,o,a)}function ye(e,t){var r,n,i,o,a;e[t>>5]|=128<<t%32,e[14+(t+64>>>9<<4)]=t;var s=1732584193,c=-271733879,l=-1732584194,u=271733878;for(r=0;r<e.length;r+=16)n=s,i=c,o=l,a=u,c=be(c=be(c=be(c=be(c=me(c=me(c=me(c=me(c=ve(c=ve(c=ve(c=ve(c=_e(c=_e(c=_e(c=_e(c,l=_e(l,u=_e(u,s=_e(s,c,l,u,e[r],7,-680876936),c,l,e[r+1],12,-389564586),s,c,e[r+2],17,606105819),u,s,e[r+3],22,-1044525330),l=_e(l,u=_e(u,s=_e(s,c,l,u,e[r+4],7,-176418897),c,l,e[r+5],12,1200080426),s,c,e[r+6],17,-1473231341),u,s,e[r+7],22,-45705983),l=_e(l,u=_e(u,s=_e(s,c,l,u,e[r+8],7,1770035416),c,l,e[r+9],12,-1958414417),s,c,e[r+10],17,-42063),u,s,e[r+11],22,-1990404162),l=_e(l,u=_e(u,s=_e(s,c,l,u,e[r+12],7,1804603682),c,l,e[r+13],12,-40341101),s,c,e[r+14],17,-1502002290),u,s,e[r+15],22,1236535329),l=ve(l,u=ve(u,s=ve(s,c,l,u,e[r+1],5,-165796510),c,l,e[r+6],9,-1069501632),s,c,e[r+11],14,643717713),u,s,e[r],20,-373897302),l=ve(l,u=ve(u,s=ve(s,c,l,u,e[r+5],5,-701558691),c,l,e[r+10],9,38016083),s,c,e[r+15],14,-660478335),u,s,e[r+4],20,-405537848),l=ve(l,u=ve(u,s=ve(s,c,l,u,e[r+9],5,568446438),c,l,e[r+14],9,-1019803690),s,c,e[r+3],14,-187363961),u,s,e[r+8],20,1163531501),l=ve(l,u=ve(u,s=ve(s,c,l,u,e[r+13],5,-1444681467),c,l,e[r+2],9,-51403784),s,c,e[r+7],14,1735328473),u,s,e[r+12],20,-1926607734),l=me(l,u=me(u,s=me(s,c,l,u,e[r+5],4,-378558),c,l,e[r+8],11,-2022574463),s,c,e[r+11],16,1839030562),u,s,e[r+14],23,-35309556),l=me(l,u=me(u,s=me(s,c,l,u,e[r+1],4,-1530992060),c,l,e[r+4],11,1272893353),s,c,e[r+7],16,-155497632),u,s,e[r+10],23,-1094730640),l=me(l,u=me(u,s=me(s,c,l,u,e[r+13],4,681279174),c,l,e[r],11,-358537222),s,c,e[r+3],16,-722521979),u,s,e[r+6],23,76029189),l=me(l,u=me(u,s=me(s,c,l,u,e[r+9],4,-640364487),c,l,e[r+12],11,-421815835),s,c,e[r+15],16,530742520),u,s,e[r+2],23,-995338651),l=be(l,u=be(u,s=be(s,c,l,u,e[r],6,-198630844),c,l,e[r+7],10,1126891415),s,c,e[r+14],15,-1416354905),u,s,e[r+5],21,-57434055),l=be(l,u=be(u,s=be(s,c,l,u,e[r+12],6,1700485571),c,l,e[r+3],10,-1894986606),s,c,e[r+10],15,-1051523),u,s,e[r+1],21,-2054922799),l=be(l,u=be(u,s=be(s,c,l,u,e[r+8],6,1873313359),c,l,e[r+15],10,-30611744),s,c,e[r+6],15,-1560198380),u,s,e[r+13],21,1309151649),l=be(l,u=be(u,s=be(s,c,l,u,e[r+4],6,-145523070),c,l,e[r+11],10,-1120210379),s,c,e[r+2],15,718787259),u,s,e[r+9],21,-343485551),s=de(s,n),c=de(c,i),l=de(l,o),u=de(u,a);return[s,c,l,u]}function Ee(e){var t,r="",n=32*e.length;for(t=0;t<n;t+=8)r+=String.fromCharCode(e[t>>5]>>>t%32&255);return r}function we(e){var t,r=[];for(r[(e.length>>2)-1]=void 0,t=0;t<r.length;t+=1)r[t]=0;var n=8*e.length;for(t=0;t<n;t+=8)r[t>>5]|=(255&e.charCodeAt(t/8))<<t%32;return r}function xe(e){var t,r,n="";for(r=0;r<e.length;r+=1)t=e.charCodeAt(r),n+="0123456789abcdef".charAt(t>>>4&15)+"0123456789abcdef".charAt(15&t);return n}function ke(e){return unescape(encodeURIComponent(e))}function Se(e){return function(e){return Ee(ye(we(e),8*e.length))}(ke(e))}function Oe(e,t){return function(e,t){var r,n,i=we(e),o=[],a=[];for(o[15]=a[15]=void 0,i.length>16&&(i=ye(i,8*e.length)),r=0;r<16;r+=1)o[r]=909522486^i[r],a[r]=1549556828^i[r];return n=ye(o.concat(we(t)),512+8*t.length),Ee(ye(a.concat(n),640))}(ke(e),ke(t))}var je=function(e,t,r){return t?r?Oe(t,e):xe(Oe(t,e)):r?Se(e):xe(Se(e))},Ce=Object.freeze({default:je,__moduleExports:je});function Re(e){this.name="RavenConfigError",this.message=e}Re.prototype=new Error,Re.prototype.constructor=Re;var Te=Re,Fe=Object.freeze({default:Te,__moduleExports:Te}),Ae={wrapMethod:function(e,t,r){var n=e[t],i=e;if(t in e){var o="warn"===t?"warning":t;e[t]=function(){var e=[].slice.call(arguments),a=oe.safeJoin(e," "),s={level:o,logger:"console",extra:{arguments:e}};"assert"===t?!1===e[0]&&(a="Assertion failed: "+(oe.safeJoin(e.slice(1)," ")||"console.assert"),s.extra.arguments=e.slice(1),r&&r(a,s)):r&&r(a,s),n&&Function.prototype.apply.call(n,i,e)}}}},Be=Ae.wrapMethod,Pe=Object.freeze({default:Ae,__moduleExports:Ae,wrapMethod:Be}),De=fe&&he||fe,Me=Ce&&je||Ce,Ue=Fe&&Te||Fe,He=Pe&&Ae||Pe,Le=oe.isError,Ie=oe.isObject,Ne=oe.isPlainObject,ze=oe.isErrorEvent,qe=oe.isUndefined,Ke=oe.isFunction,We=oe.isString,Ve=oe.isArray,Je=oe.isEmptyObject,$e=oe.each,Xe=oe.objectMerge,Ge=oe.truncate,Ye=oe.objectFrozen,Ze=oe.hasKey,Qe=oe.joinRegExp,et=oe.urlencode,tt=oe.uuid4,rt=oe.htmlTreeAsString,nt=oe.isSameException,it=oe.isSameStacktrace,ot=oe.parseUrl,at=oe.fill,st=oe.supportsFetch,ct=oe.supportsReferrerPolicy,lt=oe.serializeKeysForMessage,ut=oe.serializeException,pt=oe.sanitize,ht=He.wrapMethod,ft="source protocol user pass host port path".split(" "),dt=/^(?:(\w+):)?\/\/(?:(\w+)(:\w+)?@)?([\w\.-]+)(?::(\d+))?(\/.*)/;function gt(){return+new Date}var _t="undefined"!=typeof window?window:void 0!==e?e:"undefined"!=typeof self?self:{},vt=_t.document,mt=_t.navigator;function bt(e,t){return Ke(t)?function(r){return t(r,e)}:t}function yt(){for(var e in this._hasJSON=!("object"!=typeof JSON||!JSON.stringify),this._hasDocument=!qe(vt),this._hasNavigator=!qe(mt),this._lastCapturedException=null,this._lastData=null,this._lastEventId=null,this._globalServer=null,this._globalKey=null,this._globalProject=null,this._globalContext={},this._globalOptions={release:_t.SENTRY_RELEASE&&_t.SENTRY_RELEASE.id,logger:"javascript",ignoreErrors:[],ignoreUrls:[],whitelistUrls:[],includePaths:[],headers:null,collectWindowErrors:!0,captureUnhandledRejections:!0,maxMessageLength:0,maxUrlLength:250,stackTraceLimit:50,autoBreadcrumbs:!0,instrument:!0,sampleRate:1,sanitizeKeys:[]},this._fetchDefaults={method:"POST",keepalive:!0,referrerPolicy:ct()?"origin":""},this._ignoreOnError=0,this._isRavenInstalled=!1,this._originalErrorStackTraceLimit=Error.stackTraceLimit,this._originalConsole=_t.console||{},this._originalConsoleMethods={},this._plugins=[],this._startTime=gt(),this._wrappedBuiltIns=[],this._breadcrumbs=[],this._lastCapturedEvent=null,this._keypressTimeout,this._location=_t.location,this._lastHref=this._location&&this._location.href,this._resetBackoff(),this._originalConsole)this._originalConsoleMethods[e]=this._originalConsole[e]}yt.prototype={VERSION:"3.24.2",debug:!1,TraceKit:De,config:function(e,t){var r=this;if(r._globalServer)return this._logDebug("error","Error: Raven has already been configured"),r;if(!e)return r;var n=r._globalOptions;t&&$e(t,function(e,t){"tags"===e||"extra"===e||"user"===e?r._globalContext[e]=t:n[e]=t}),r.setDSN(e),n.ignoreErrors.push(/^Script error\.?$/),n.ignoreErrors.push(/^Javascript error: Script error\.? on line 0$/),n.ignoreErrors=Qe(n.ignoreErrors),n.ignoreUrls=!!n.ignoreUrls.length&&Qe(n.ignoreUrls),n.whitelistUrls=!!n.whitelistUrls.length&&Qe(n.whitelistUrls),n.includePaths=Qe(n.includePaths),n.maxBreadcrumbs=Math.max(0,Math.min(n.maxBreadcrumbs||100,100));var i={xhr:!0,console:!0,dom:!0,location:!0,sentry:!0},o=n.autoBreadcrumbs;"[object Object]"==={}.toString.call(o)?o=Xe(i,o):!1!==o&&(o=i),n.autoBreadcrumbs=o;var a={tryCatch:!0},s=n.instrument;return"[object Object]"==={}.toString.call(s)?s=Xe(a,s):!1!==s&&(s=a),n.instrument=s,De.collectWindowErrors=!!n.collectWindowErrors,r},install:function(){var e=this;return e.isSetup()&&!e._isRavenInstalled&&(De.report.subscribe(function(){e._handleOnErrorStackInfo.apply(e,arguments)}),e._globalOptions.captureUnhandledRejections&&e._attachPromiseRejectionHandler(),e._patchFunctionToString(),e._globalOptions.instrument&&e._globalOptions.instrument.tryCatch&&e._instrumentTryCatch(),e._globalOptions.autoBreadcrumbs&&e._instrumentBreadcrumbs(),e._drainPlugins(),e._isRavenInstalled=!0),Error.stackTraceLimit=e._globalOptions.stackTraceLimit,this},setDSN:function(e){var t=this._parseDSN(e),r=t.path.lastIndexOf("/"),n=t.path.substr(1,r);this._dsn=e,this._globalKey=t.user,this._globalSecret=t.pass&&t.pass.substr(1),this._globalProject=t.path.substr(r+1),this._globalServer=this._getGlobalServer(t),this._globalEndpoint=this._globalServer+"/"+n+"api/"+this._globalProject+"/store/",this._resetBackoff()},context:function(e,t,r){return Ke(e)&&(r=t||[],t=e,e=void 0),this.wrap(e,t).apply(this,r)},wrap:function(e,t,r){var n=this;if(qe(t)&&!Ke(e))return e;if(Ke(e)&&(t=e,e=void 0),!Ke(t))return t;try{if(t.__raven__)return t;if(t.__raven_wrapper__)return t.__raven_wrapper__}catch(e){return t}function i(){var i=[],o=arguments.length,a=!e||e&&!1!==e.deep;for(r&&Ke(r)&&r.apply(this,arguments);o--;)i[o]=a?n.wrap(e,arguments[o]):arguments[o];try{return t.apply(this,i)}catch(t){throw n._ignoreNextOnError(),n.captureException(t,e),t}}for(var o in t)Ze(t,o)&&(i[o]=t[o]);return i.prototype=t.prototype,t.__raven_wrapper__=i,i.__raven__=!0,i.__orig__=t,i},uninstall:function(){return De.report.uninstall(),this._detachPromiseRejectionHandler(),this._unpatchFunctionToString(),this._restoreBuiltIns(),this._restoreConsole(),Error.stackTraceLimit=this._originalErrorStackTraceLimit,this._isRavenInstalled=!1,this},_promiseRejectionHandler:function(e){this._logDebug("debug","Raven caught unhandled promise rejection:",e),this.captureException(e.reason,{extra:{unhandledPromiseRejection:!0}})},_attachPromiseRejectionHandler:function(){return this._promiseRejectionHandler=this._promiseRejectionHandler.bind(this),_t.addEventListener&&_t.addEventListener("unhandledrejection",this._promiseRejectionHandler),this},_detachPromiseRejectionHandler:function(){return _t.removeEventListener&&_t.removeEventListener("unhandledrejection",this._promiseRejectionHandler),this},captureException:function(e,t){if(t=Xe({trimHeadFrames:0},t||{}),ze(e)&&e.error)e=e.error;else if(Le(e))e=e;else{if(!Ne(e))return this.captureMessage(e,Xe(t,{stacktrace:!0,trimHeadFrames:t.trimHeadFrames+1}));t=this._getCaptureExceptionOptionsFromPlainObject(t,e),e=new Error(t.message)}this._lastCapturedException=e;try{var r=De.computeStackTrace(e);this._handleStackInfo(r,t)}catch(t){if(e!==t)throw t}return this},_getCaptureExceptionOptionsFromPlainObject:function(e,t){var r=Object.keys(t).sort(),n=Xe(e,{message:"Non-Error exception captured with keys: "+lt(r),fingerprint:[Me(r)],extra:e.extra||{}});return n.extra.__serialized__=ut(t),n},captureMessage:function(e,t){if(!this._globalOptions.ignoreErrors.test||!this._globalOptions.ignoreErrors.test(e)){var r,n=Xe({message:e+=""},t=t||{});try{throw new Error(e)}catch(e){r=e}r.name=null;var i=De.computeStackTrace(r),o=Ve(i.stack)&&i.stack[1];o&&"Raven.captureException"===o.func&&(o=i.stack[2]);var a=o&&o.url||"";if((!this._globalOptions.ignoreUrls.test||!this._globalOptions.ignoreUrls.test(a))&&(!this._globalOptions.whitelistUrls.test||this._globalOptions.whitelistUrls.test(a))){if(this._globalOptions.stacktrace||t&&t.stacktrace){n.fingerprint=null==n.fingerprint?e:n.fingerprint,(t=Xe({trimHeadFrames:0},t)).trimHeadFrames+=1;var s=this._prepareFrames(i,t);n.stacktrace={frames:s.reverse()}}return n.fingerprint&&(n.fingerprint=Ve(n.fingerprint)?n.fingerprint:[n.fingerprint]),this._send(n),this}}},captureBreadcrumb:function(e){var t=Xe({timestamp:gt()/1e3},e);if(Ke(this._globalOptions.breadcrumbCallback)){var r=this._globalOptions.breadcrumbCallback(t);if(Ie(r)&&!Je(r))t=r;else if(!1===r)return this}return this._breadcrumbs.push(t),this._breadcrumbs.length>this._globalOptions.maxBreadcrumbs&&this._breadcrumbs.shift(),this},addPlugin:function(e){var t=[].slice.call(arguments,1);return this._plugins.push([e,t]),this._isRavenInstalled&&this._drainPlugins(),this},setUserContext:function(e){return this._globalContext.user=e,this},setExtraContext:function(e){return this._mergeContext("extra",e),this},setTagsContext:function(e){return this._mergeContext("tags",e),this},clearContext:function(){return this._globalContext={},this},getContext:function(){return JSON.parse(o(this._globalContext))},setEnvironment:function(e){return this._globalOptions.environment=e,this},setRelease:function(e){return this._globalOptions.release=e,this},setDataCallback:function(e){var t=this._globalOptions.dataCallback;return this._globalOptions.dataCallback=bt(t,e),this},setBreadcrumbCallback:function(e){var t=this._globalOptions.breadcrumbCallback;return this._globalOptions.breadcrumbCallback=bt(t,e),this},setShouldSendCallback:function(e){var t=this._globalOptions.shouldSendCallback;return this._globalOptions.shouldSendCallback=bt(t,e),this},setTransport:function(e){return this._globalOptions.transport=e,this},lastException:function(){return this._lastCapturedException},lastEventId:function(){return this._lastEventId},isSetup:function(){return!!this._hasJSON&&(!!this._globalServer||(this.ravenNotConfiguredError||(this.ravenNotConfiguredError=!0,this._logDebug("error","Error: Raven has not been configured.")),!1))},afterLoad:function(){var e=_t.RavenConfig;e&&this.config(e.dsn,e.config).install()},showReportDialog:function(e){if(vt){var t=(e=e||{}).eventId||this.lastEventId();if(!t)throw new Ue("Missing eventId");var r=e.dsn||this._dsn;if(!r)throw new Ue("Missing DSN");var n=encodeURIComponent,i="";i+="?eventId="+n(t),i+="&dsn="+n(r);var o=e.user||this._globalContext.user;o&&(o.name&&(i+="&name="+n(o.name)),o.email&&(i+="&email="+n(o.email)));var a=this._getGlobalServer(this._parseDSN(r)),s=vt.createElement("script");s.async=!0,s.src=a+"/api/embed/error-page/"+i,(vt.head||vt.body).appendChild(s)}},_ignoreNextOnError:function(){var e=this;this._ignoreOnError+=1,setTimeout(function(){e._ignoreOnError-=1})},_triggerEvent:function(e,t){var r,n;if(this._hasDocument){for(n in t=t||{},e="raven"+e.substr(0,1).toUpperCase()+e.substr(1),vt.createEvent?(r=vt.createEvent("HTMLEvents")).initEvent(e,!0,!0):(r=vt.createEventObject()).eventType=e,t)Ze(t,n)&&(r[n]=t[n]);if(vt.createEvent)vt.dispatchEvent(r);else try{vt.fireEvent("on"+r.eventType.toLowerCase(),r)}catch(e){}}},_breadcrumbEventHandler:function(e){var t=this;return function(r){if(t._keypressTimeout=null,t._lastCapturedEvent!==r){var n;t._lastCapturedEvent=r;try{n=rt(r.target)}catch(e){n="<unknown>"}t.captureBreadcrumb({category:"ui."+e,message:n})}}},_keypressEventHandler:function(){var e=this;return function(t){var r;try{r=t.target}catch(e){return}var n=r&&r.tagName;if(n&&("INPUT"===n||"TEXTAREA"===n||r.isContentEditable)){var i=e._keypressTimeout;i||e._breadcrumbEventHandler("input")(t),clearTimeout(i),e._keypressTimeout=setTimeout(function(){e._keypressTimeout=null},1e3)}}},_captureUrlChange:function(e,t){var r=ot(this._location.href),n=ot(t),i=ot(e);this._lastHref=t,r.protocol===n.protocol&&r.host===n.host&&(t=n.relative),r.protocol===i.protocol&&r.host===i.host&&(e=i.relative),this.captureBreadcrumb({category:"navigation",data:{to:t,from:e}})},_patchFunctionToString:function(){var e=this;e._originalFunctionToString=Function.prototype.toString,Function.prototype.toString=function(){return"function"==typeof this&&this.__raven__?e._originalFunctionToString.apply(this.__orig__,arguments):e._originalFunctionToString.apply(this,arguments)}},_unpatchFunctionToString:function(){this._originalFunctionToString&&(Function.prototype.toString=this._originalFunctionToString)},_instrumentTryCatch:function(){var e=this,t=e._wrappedBuiltIns;function r(t){return function(r,n){for(var i=new Array(arguments.length),o=0;o<i.length;++o)i[o]=arguments[o];var a=i[0];return Ke(a)&&(i[0]=e.wrap(a)),t.apply?t.apply(this,i):t(i[0],i[1])}}var n=this._globalOptions.autoBreadcrumbs;function i(r){var i=_t[r]&&_t[r].prototype;i&&i.hasOwnProperty&&i.hasOwnProperty("addEventListener")&&(at(i,"addEventListener",function(t){return function(i,o,a,s){try{o&&o.handleEvent&&(o.handleEvent=e.wrap(o.handleEvent))}catch(e){}var c,l,u;return n&&n.dom&&("EventTarget"===r||"Node"===r)&&(l=e._breadcrumbEventHandler("click"),u=e._keypressEventHandler(),c=function(e){if(e){var t;try{t=e.type}catch(e){return}return"click"===t?l(e):"keypress"===t?u(e):void 0}}),t.call(this,i,e.wrap(o,void 0,c),a,s)}},t),at(i,"removeEventListener",function(e){return function(t,r,n,i){try{r=r&&(r.__raven_wrapper__?r.__raven_wrapper__:r)}catch(e){}return e.call(this,t,r,n,i)}},t))}at(_t,"setTimeout",r,t),at(_t,"setInterval",r,t),_t.requestAnimationFrame&&at(_t,"requestAnimationFrame",function(t){return function(r){return t(e.wrap(r))}},t);for(var o=["EventTarget","Window","Node","ApplicationCache","AudioTrackList","ChannelMergerNode","CryptoOperation","EventSource","FileReader","HTMLUnknownElement","IDBDatabase","IDBRequest","IDBTransaction","KeyOperation","MediaController","MessagePort","ModalWindow","Notification","SVGElementInstance","Screen","TextTrack","TextTrackCue","TextTrackList","WebSocket","WebSocketWorker","Worker","XMLHttpRequest","XMLHttpRequestEventTarget","XMLHttpRequestUpload"],a=0;a<o.length;a++)i(o[a])},_instrumentBreadcrumbs:function(){var e=this,t=this._globalOptions.autoBreadcrumbs,r=e._wrappedBuiltIns;function n(t,r){t in r&&Ke(r[t])&&at(r,t,function(t){return e.wrap(t)})}if(t.xhr&&"XMLHttpRequest"in _t){var i=_t.XMLHttpRequest&&_t.XMLHttpRequest.prototype;at(i,"open",function(t){return function(r,n){return We(n)&&-1===n.indexOf(e._globalKey)&&(this.__raven_xhr={method:r,url:n,status_code:null}),t.apply(this,arguments)}},r),at(i,"send",function(t){return function(){var r=this;function i(){if(r.__raven_xhr&&4===r.readyState){try{r.__raven_xhr.status_code=r.status}catch(e){}e.captureBreadcrumb({type:"http",category:"xhr",data:r.__raven_xhr})}}for(var o=["onload","onerror","onprogress"],a=0;a<o.length;a++)n(o[a],r);return"onreadystatechange"in r&&Ke(r.onreadystatechange)?at(r,"onreadystatechange",function(t){return e.wrap(t,void 0,i)}):r.onreadystatechange=i,t.apply(this,arguments)}},r)}t.xhr&&st()&&at(_t,"fetch",function(t){return function(){for(var r=new Array(arguments.length),n=0;n<r.length;++n)r[n]=arguments[n];var i,o=r[0],a="GET";if("string"==typeof o?i=o:"Request"in _t&&o instanceof _t.Request?(i=o.url,o.method&&(a=o.method)):i=""+o,-1!==i.indexOf(e._globalKey))return t.apply(this,r);r[1]&&r[1].method&&(a=r[1].method);var s={method:a,url:i,status_code:null};return t.apply(this,r).then(function(t){return s.status_code=t.status,e.captureBreadcrumb({type:"http",category:"fetch",data:s}),t}).catch(function(t){throw e.captureBreadcrumb({type:"http",category:"fetch",data:s,level:"error"}),t})}},r),t.dom&&this._hasDocument&&(vt.addEventListener?(vt.addEventListener("click",e._breadcrumbEventHandler("click"),!1),vt.addEventListener("keypress",e._keypressEventHandler(),!1)):vt.attachEvent&&(vt.attachEvent("onclick",e._breadcrumbEventHandler("click")),vt.attachEvent("onkeypress",e._keypressEventHandler())));var o=_t.chrome,a=!(o&&o.app&&o.app.runtime)&&_t.history&&history.pushState&&history.replaceState;if(t.location&&a){var s=_t.onpopstate;_t.onpopstate=function(){var t=e._location.href;if(e._captureUrlChange(e._lastHref,t),s)return s.apply(this,arguments)};var c=function(t){return function(){var r=arguments.length>2?arguments[2]:void 0;return r&&e._captureUrlChange(e._lastHref,r+""),t.apply(this,arguments)}};at(history,"pushState",c,r),at(history,"replaceState",c,r)}if(t.console&&"console"in _t&&console.log){var l=function(t,r){e.captureBreadcrumb({message:t,level:r.level,category:"console"})};$e(["debug","info","warn","error","log"],function(e,t){ht(console,t,l)})}},_restoreBuiltIns:function(){for(var e;this._wrappedBuiltIns.length;){var t=(e=this._wrappedBuiltIns.shift())[0],r=e[1],n=e[2];t[r]=n}},_restoreConsole:function(){for(var e in this._originalConsoleMethods)this._originalConsole[e]=this._originalConsoleMethods[e]},_drainPlugins:function(){var e=this;$e(this._plugins,function(t,r){var n=r[0],i=r[1];n.apply(e,[e].concat(i))})},_parseDSN:function(e){var t=dt.exec(e),r={},n=7;try{for(;n--;)r[ft[n]]=t[n]||""}catch(t){throw new Ue("Invalid DSN: "+e)}if(r.pass&&!this._globalOptions.allowSecretKey)throw new Ue("Do not specify your secret key in the DSN. See: http://bit.ly/raven-secret-key");return r},_getGlobalServer:function(e){var t="//"+e.host+(e.port?":"+e.port:"");return e.protocol&&(t=e.protocol+":"+t),t},_handleOnErrorStackInfo:function(){this._ignoreOnError||this._handleStackInfo.apply(this,arguments)},_handleStackInfo:function(e,t){var r=this._prepareFrames(e,t);this._triggerEvent("handle",{stackInfo:e,options:t}),this._processException(e.name,e.message,e.url,e.lineno,r,t)},_prepareFrames:function(e,t){var r=this,n=[];if(e.stack&&e.stack.length&&($e(e.stack,function(t,i){var o=r._normalizeFrame(i,e.url);o&&n.push(o)}),t&&t.trimHeadFrames))for(var i=0;i<t.trimHeadFrames&&i<n.length;i++)n[i].in_app=!1;return n=n.slice(0,this._globalOptions.stackTraceLimit)},_normalizeFrame:function(e,t){var r={filename:e.url,lineno:e.line,colno:e.column,function:e.func||"?"};return e.url||(r.filename=t),r.in_app=!(this._globalOptions.includePaths.test&&!this._globalOptions.includePaths.test(r.filename)||/(Raven|TraceKit)\./.test(r.function)||/raven\.(min\.)?js$/.test(r.filename)),r},_processException:function(e,t,r,n,i,o){var a,s=(e?e+": ":"")+(t||"");if((!this._globalOptions.ignoreErrors.test||!this._globalOptions.ignoreErrors.test(t)&&!this._globalOptions.ignoreErrors.test(s))&&(i&&i.length?(r=i[0].filename||r,i.reverse(),a={frames:i}):r&&(a={frames:[{filename:r,lineno:n,in_app:!0}]}),(!this._globalOptions.ignoreUrls.test||!this._globalOptions.ignoreUrls.test(r))&&(!this._globalOptions.whitelistUrls.test||this._globalOptions.whitelistUrls.test(r)))){var c=Xe({exception:{values:[{type:e,value:t,stacktrace:a}]},culprit:r},o);this._send(c)}},_trimPacket:function(e){var t=this._globalOptions.maxMessageLength;if(e.message&&(e.message=Ge(e.message,t)),e.exception){var r=e.exception.values[0];r.value=Ge(r.value,t)}var n=e.request;return n&&(n.url&&(n.url=Ge(n.url,this._globalOptions.maxUrlLength)),n.Referer&&(n.Referer=Ge(n.Referer,this._globalOptions.maxUrlLength))),e.breadcrumbs&&e.breadcrumbs.values&&this._trimBreadcrumbs(e.breadcrumbs),e},_trimBreadcrumbs:function(e){for(var t,r,n,i=["to","from","url"],o=0;o<e.values.length;++o)if((r=e.values[o]).hasOwnProperty("data")&&Ie(r.data)&&!Ye(r.data)){n=Xe({},r.data);for(var a=0;a<i.length;++a)t=i[a],n.hasOwnProperty(t)&&n[t]&&(n[t]=Ge(n[t],this._globalOptions.maxUrlLength));e.values[o].data=n}},_getHttpData:function(){if(this._hasNavigator||this._hasDocument){var e={};return this._hasNavigator&&mt.userAgent&&(e.headers={"User-Agent":navigator.userAgent}),_t.location&&_t.location.href&&(e.url=_t.location.href),this._hasDocument&&vt.referrer&&(e.headers||(e.headers={}),e.headers.Referer=vt.referrer),e}},_resetBackoff:function(){this._backoffDuration=0,this._backoffStart=null},_shouldBackoff:function(){return this._backoffDuration&>()-this._backoffStart<this._backoffDuration},_isRepeatData:function(e){var t=this._lastData;return!(!t||e.message!==t.message||e.culprit!==t.culprit)&&(e.stacktrace||t.stacktrace?it(e.stacktrace,t.stacktrace):!e.exception&&!t.exception||nt(e.exception,t.exception))},_setBackoffState:function(e){if(!this._shouldBackoff()){var t=e.status;if(400===t||401===t||429===t){var r;try{r=st()?e.headers.get("Retry-After"):e.getResponseHeader("Retry-After"),r=1e3*parseInt(r,10)}catch(e){}this._backoffDuration=r||(2*this._backoffDuration||1e3),this._backoffStart=gt()}}},_send:function(e){var t=this._globalOptions,r={project:this._globalProject,logger:t.logger,platform:"javascript"},n=this._getHttpData();n&&(r.request=n),e.trimHeadFrames&&delete e.trimHeadFrames,(e=Xe(r,e)).tags=Xe(Xe({},this._globalContext.tags),e.tags),e.extra=Xe(Xe({},this._globalContext.extra),e.extra),e.extra["session:duration"]=gt()-this._startTime,this._breadcrumbs&&this._breadcrumbs.length>0&&(e.breadcrumbs={values:[].slice.call(this._breadcrumbs,0)}),this._globalContext.user&&(e.user=this._globalContext.user),t.environment&&(e.environment=t.environment),t.release&&(e.release=t.release),t.serverName&&(e.server_name=t.serverName),e=this._sanitizeData(e),Object.keys(e).forEach(function(t){(null==e[t]||""===e[t]||Je(e[t]))&&delete e[t]}),Ke(t.dataCallback)&&(e=t.dataCallback(e)||e),e&&!Je(e)&&(Ke(t.shouldSendCallback)&&!t.shouldSendCallback(e)||(this._shouldBackoff()?this._logDebug("warn","Raven dropped error due to backoff: ",e):"number"==typeof t.sampleRate?Math.random()<t.sampleRate&&this._sendProcessedPayload(e):this._sendProcessedPayload(e)))},_sanitizeData:function(e){return pt(e,this._globalOptions.sanitizeKeys)},_getUuid:function(){return tt()},_sendProcessedPayload:function(e,t){var r=this,n=this._globalOptions;if(this.isSetup())if(e=this._trimPacket(e),this._globalOptions.allowDuplicates||!this._isRepeatData(e)){this._lastEventId=e.event_id||(e.event_id=this._getUuid()),this._lastData=e,this._logDebug("debug","Raven about to send:",e);var i={sentry_version:"7",sentry_client:"raven-js/"+this.VERSION,sentry_key:this._globalKey};this._globalSecret&&(i.sentry_secret=this._globalSecret);var o=e.exception&&e.exception.values[0];this._globalOptions.autoBreadcrumbs&&this._globalOptions.autoBreadcrumbs.sentry&&this.captureBreadcrumb({category:"sentry",message:o?(o.type?o.type+": ":"")+o.value:e.message,event_id:e.event_id,level:e.level||"error"});var a=this._globalEndpoint;(n.transport||this._makeRequest).call(this,{url:a,auth:i,data:e,options:n,onSuccess:function(){r._resetBackoff(),r._triggerEvent("success",{data:e,src:a}),t&&t()},onError:function(n){r._logDebug("error","Raven transport failed to send: ",n),n.request&&r._setBackoffState(n.request),r._triggerEvent("failure",{data:e,src:a}),n=n||new Error("Raven send failed (no additional details provided)"),t&&t(n)}})}else this._logDebug("warn","Raven dropped repeat event: ",e)},_makeRequest:function(e){var t=e.url+"?"+et(e.auth),r=null,n={};if(e.options.headers&&(r=this._evaluateHash(e.options.headers)),e.options.fetchParameters&&(n=this._evaluateHash(e.options.fetchParameters)),st()){n.body=o(e.data);var i=Xe({},this._fetchDefaults),a=Xe(i,n);return r&&(a.headers=r),_t.fetch(t,a).then(function(t){if(t.ok)e.onSuccess&&e.onSuccess();else{var r=new Error("Sentry error code: "+t.status);r.request=t,e.onError&&e.onError(r)}}).catch(function(){e.onError&&e.onError(new Error("Sentry error code: network unavailable"))})}var s=_t.XMLHttpRequest&&new _t.XMLHttpRequest;s&&(("withCredentials"in s||"undefined"!=typeof XDomainRequest)&&("withCredentials"in s?s.onreadystatechange=function(){if(4===s.readyState)if(200===s.status)e.onSuccess&&e.onSuccess();else if(e.onError){var t=new Error("Sentry error code: "+s.status);t.request=s,e.onError(t)}}:(s=new XDomainRequest,t=t.replace(/^https?:/,""),e.onSuccess&&(s.onload=e.onSuccess),e.onError&&(s.onerror=function(){var t=new Error("Sentry error code: XDomainRequest");t.request=s,e.onError(t)})),s.open("POST",t),r&&$e(r,function(e,t){s.setRequestHeader(e,t)}),s.send(o(e.data))))},_evaluateHash:function(e){var t={};for(var r in e)if(e.hasOwnProperty(r)){var n=e[r];t[r]="function"==typeof n?n():n}return t},_logDebug:function(e){this._originalConsoleMethods[e]&&(this.debug||this._globalOptions.debug)&&Function.prototype.apply.call(this._originalConsoleMethods[e],this._originalConsole,[].slice.call(arguments,1))},_mergeContext:function(e,t){qe(t)?delete this._globalContext[e]:this._globalContext[e]=Xe(this._globalContext[e]||{},t)}},yt.prototype.setUser=yt.prototype.setUserContext,yt.prototype.setReleaseContext=yt.prototype.setRelease;var Et=yt,wt=Object.freeze({default:Et,__moduleExports:Et}),xt=wt&&Et||wt,kt="undefined"!=typeof window?window:void 0!==e?e:"undefined"!=typeof self?self:{},St=kt.Raven,Ot=new xt;Ot.noConflict=function(){return kt.Raven=St,Ot},Ot.afterLoad();var jt,Ct,Rt,Tt,Ft,At,Bt,Pt=Ot,Dt=xt;Pt.Client=Dt,(Bt="plyr.io"===window.location.host)&&Pt.config("https://[email protected]/305555").install(),document.addEventListener("DOMContentLoaded",function(){Pt.context(function(){window.shr&&window.shr.setup({count:{classname:"button__count"}}),document.addEventListener("focusout",function(e){e.target.classList.remove("tab-focus")}),document.addEventListener("keydown",function(e){9===e.keyCode&&setTimeout(function(){document.activeElement.classList.add("tab-focus")},0)});var e=new Plyr("#player",{debug:!0,title:"View From A Blue Moon",iconUrl:"../dist/plyr.svg",keyboard:{global:!0},tooltips:{controls:!0},captions:{active:!0},keys:{google:"AIzaSyDrNwtN3nLH_8rjCmu5Wq3ZCm4MNAVdc0c"},ads:{enabled:!0,publisherId:"918848828995742"}});window.player=e;var t=document.querySelectorAll("[data-source]"),r={video:"video",audio:"audio",youtube:"youtube",vimeo:"vimeo"},n=window.location.hash.replace("#",""),i=window.history&&window.history.pushState;function o(e,t,r){e&&e.classList[r?"add":"remove"](t)}function a(i,a){if(i in r&&(a||i!==n)&&(n.length||i!==r.video)){switch(i){case r.video:e.source={type:"video",title:"View From A Blue Moon",sources:[{src:"https://cdn.plyr.io/static/demo/View_From_A_Blue_Moon_Trailer-576p.mp4",type:"video/mp4",size:576},{src:"https://cdn.plyr.io/static/demo/View_From_A_Blue_Moon_Trailer-720p.mp4",type:"video/mp4",size:720},{src:"https://cdn.plyr.io/static/demo/View_From_A_Blue_Moon_Trailer-1080p.mp4",type:"video/mp4",size:1080},{src:"https://cdn.plyr.io/static/demo/View_From_A_Blue_Moon_Trailer-1440p.mp4",type:"video/mp4",size:1440}],poster:"https://cdn.plyr.io/static/demo/View_From_A_Blue_Moon_Trailer-HD.jpg",tracks:[{kind:"captions",label:"English",srclang:"en",src:"https://cdn.plyr.io/static/demo/View_From_A_Blue_Moon_Trailer-HD.en.vtt",default:!0},{kind:"captions",label:"French",srclang:"fr",src:"https://cdn.plyr.io/static/demo/View_From_A_Blue_Moon_Trailer-HD.fr.vtt"}]};break;case r.audio:e.source={type:"audio",title:"Kishi Bashi – “It All Began With A Burst”",sources:[{src:"https://cdn.plyr.io/static/demo/Kishi_Bashi_-_It_All_Began_With_a_Burst.mp3",type:"audio/mp3"},{src:"https://cdn.plyr.io/static/demo/Kishi_Bashi_-_It_All_Began_With_a_Burst.ogg",type:"audio/ogg"}]};break;case r.youtube:e.source={type:"video",sources:[{src:"https://youtube.com/watch?v=bTqVqk7FSmY",provider:"youtube"}]};break;case r.vimeo:e.source={type:"video",sources:[{src:"https://vimeo.com/76979871",provider:"vimeo"}]}}n=i,Array.from(t).forEach(function(e){return o(e.parentElement,"active",!1)}),o(document.querySelector('[data-source="'+i+'"]'),"active",!0),Array.from(document.querySelectorAll(".plyr__cite")).forEach(function(e){e.setAttribute("hidden","")}),document.querySelector(".plyr__cite--"+i).removeAttribute("hidden")}}if(Array.from(t).forEach(function(e){e.addEventListener("click",function(){var t=e.getAttribute("data-source");a(t),i&&window.history.pushState({type:t},"","#"+t)})}),window.addEventListener("popstate",function(e){e.state&&"type"in e.state&&a(e.state.type)}),i){var s=!n.length;s&&(n=r.video),n in r&&window.history.replaceState({type:n},"",s?"":"#"+n),n!==r.video&&a(n,!0)}})}),Bt&&(jt=window,Ct=document,Rt="script",Tt="ga",jt.GoogleAnalyticsObject=Tt,jt.ga=jt.ga||function(){(jt.ga.q=jt.ga.q||[]).push(arguments)},jt.ga.l=1*new Date,Ft=Ct.createElement(Rt),At=Ct.getElementsByTagName(Rt)[0],Ft.async=1,Ft.src="https://www.google-analytics.com/analytics.js",At.parentNode.insertBefore(Ft,At),window.ga("create","UA-40881672-11","auto"),window.ga("send","pageview"))}();
//# sourceMappingURL=demo.min.js.map
|
v
|
registry.rs
|
use super::{Code, Codec, Protocol};
use crate::codec::StdCodec;
use crate::proto::{DnsAddr, Ockam, Tcp};
use alloc::collections::btree_map::BTreeMap;
use alloc::sync::Arc;
use core::fmt;
#[derive(Clone)]
pub struct Registry {
inner: Arc<RegistryImpl>,
}
struct RegistryImpl {
bytes: BTreeMap<Code, Arc<dyn Codec>>,
strings: BTreeMap<&'static str, Arc<dyn Codec>>,
}
impl fmt::Debug for Registry {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Registry")
}
}
impl Default for Registry {
fn default() -> Self {
let std_codec = Arc::new(StdCodec);
let mut r = RegistryBuilder::new();
r.register(Tcp::CODE, Tcp::PREFIX, std_codec.clone());
r.register(DnsAddr::CODE, DnsAddr::PREFIX, std_codec.clone());
#[allow(clippy::redundant_clone)]
r.register(Ockam::CODE, Ockam::PREFIX, std_codec.clone());
#[cfg(feature = "std")]
r.register(
crate::proto::Ip4::CODE,
crate::proto::Ip4::PREFIX,
std_codec.clone(),
)
.register(
crate::proto::Ip6::CODE,
crate::proto::Ip6::PREFIX,
std_codec,
);
r.finish()
}
}
impl Registry {
pub fn get_by_code(&self, code: Code) -> Option<Arc<dyn Codec>> {
self.inner.bytes.get(&code).cloned()
}
pub fn get_by_prefix(&self, prefix: &str) -> Option<Arc<dyn Codec>> {
self.inner.strings.get(prefix).cloned()
}
pub fn codes(&self) -> impl Iterator<Item = Code> + '_ {
self.inner.bytes.keys().copied()
}
pub fn prefixes(&self) -> impl Iterator<Item = &str> + '_ {
self.inner.strings.keys().copied()
}
}
pub struct RegistryBuilder(RegistryImpl);
impl Default for RegistryBuilder {
fn default() -> Self {
RegistryBuilder::new()
}
}
impl RegistryBuilder {
pub fn new() -> Self {
RegistryBuilder(RegistryImpl {
bytes: BTreeMap::new(),
strings: BTreeMap::new(),
})
}
pub fn has_code(&self, c: Code) -> bool {
self.0.bytes.contains_key(&c)
}
pub fn
|
(&self, prefix: &str) -> bool {
self.0.strings.contains_key(prefix)
}
pub fn register<T>(&mut self, code: Code, prefix: &'static str, codec: Arc<T>) -> &mut Self
where
T: Codec + 'static,
{
self.0.bytes.insert(code, codec.clone());
self.0.strings.insert(prefix, codec);
self
}
pub fn finish(self) -> Registry {
Registry {
inner: Arc::new(self.0),
}
}
}
|
has_prefix
|
event.rs
|
// Generated from definition io.k8s.api.events.v1.Event
/// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.
#[derive(Clone, Debug, PartialEq)]
pub struct Event {
/// action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field can have at most 128 characters.
pub action: Option<String>,
/// deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.
pub deprecated_count: Option<i32>,
/// deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
pub deprecated_first_timestamp: Option<crate::apimachinery::pkg::apis::meta::v1::Time>,
/// deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
pub deprecated_last_timestamp: Option<crate::apimachinery::pkg::apis::meta::v1::Time>,
/// deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.
pub deprecated_source: Option<crate::api::core::v1::EventSource>,
/// eventTime is the time when this Event was first observed. It is required.
pub event_time: crate::apimachinery::pkg::apis::meta::v1::MicroTime,
pub metadata: crate::apimachinery::pkg::apis::meta::v1::ObjectMeta,
/// note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.
pub note: Option<String>,
/// reason is why the action was taken. It is human-readable. This field can have at most 128 characters.
pub reason: Option<String>,
/// regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.
pub regarding: Option<crate::api::core::v1::ObjectReference>,
/// related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.
pub related: Option<crate::api::core::v1::ObjectReference>,
/// reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.
pub reporting_controller: Option<String>,
/// reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.
pub reporting_instance: Option<String>,
/// series is data about the Event series this event represents or nil if it's a singleton Event.
pub series: Option<crate::api::events::v1::EventSeries>,
/// type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable.
pub type_: Option<String>,
}
// Begin events.k8s.io/v1/Event
// Generated from operation createEventsV1NamespacedEvent
impl Event {
/// create an Event
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::CreateResponse`]`<Self>>` constructor, or [`crate::CreateResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn create_namespaced_event(
namespace: &str,
body: &crate::api::events::v1::Event,
optional: crate::CreateOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::CreateResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/events.k8s.io/v1/namespaces/{namespace}/events?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::post(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation deleteEventsV1CollectionNamespacedEvent
impl Event {
/// delete collection of Event
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::DeleteResponse`]`<`[`crate::List`]`<Self>>>` constructor, or [`crate::DeleteResponse`]`<`[`crate::List`]`<Self>>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `delete_optional`
///
/// Delete options. Use `Default::default()` to not pass any.
///
/// * `list_optional`
///
/// List options. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_collection_namespaced_event(
namespace: &str,
delete_optional: crate::DeleteOptional<'_>,
list_optional: crate::ListOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::DeleteResponse<crate::List<Self>>>), crate::RequestError> {
let __url = format!("/apis/events.k8s.io/v1/namespaces/{namespace}/events?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
list_optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::delete(__url);
let __body = crate::serde_json::to_vec(&delete_optional).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation deleteEventsV1NamespacedEvent
impl Event {
/// delete an Event
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::DeleteResponse`]`<Self>>` constructor, or [`crate::DeleteResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the Event
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_namespaced_event(
name: &str,
namespace: &str,
optional: crate::DeleteOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::DeleteResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/events.k8s.io/v1/namespaces/{namespace}/events/{name}",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let __request = crate::http::Request::delete(__url);
let __body = crate::serde_json::to_vec(&optional).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation listEventsV1EventForAllNamespaces
impl Event {
/// list or watch objects of kind Event
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ListResponse`]`<Self>>` constructor, or [`crate::ListResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_event_for_all_namespaces(
optional: crate::ListOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::ListResponse<Self>>), crate::RequestError> {
let __url = "/apis/events.k8s.io/v1/events?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation listEventsV1NamespacedEvent
impl Event {
/// list or watch objects of kind Event
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ListResponse`]`<Self>>` constructor, or [`crate::ListResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_namespaced_event(
namespace: &str,
optional: crate::ListOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::ListResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/events.k8s.io/v1/namespaces/{namespace}/events?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation patchEventsV1NamespacedEvent
impl Event {
/// partially update the specified Event
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::PatchResponse`]`<Self>>` constructor, or [`crate::PatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the Event
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch_namespaced_event(
name: &str,
namespace: &str,
body: &crate::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::PatchOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::PatchResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/events.k8s.io/v1/namespaces/{namespace}/events/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::patch(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static(match body {
crate::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation readEventsV1NamespacedEvent
impl Event {
/// read the specified Event
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadNamespacedEventResponse`]`>` constructor, or [`ReadNamespacedEventResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the Event
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn read_namespaced_event(
name: &str,
namespace: &str,
optional: ReadNamespacedEventOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<ReadNamespacedEventResponse>), crate::RequestError> {
let ReadNamespacedEventOptional {
exact,
export,
pretty,
} = optional;
let __url = format!("/apis/events.k8s.io/v1/namespaces/{namespace}/events/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(exact) = exact {
__query_pairs.append_pair("exact", &exact.to_string());
}
if let Some(export) = export {
__query_pairs.append_pair("export", &export.to_string());
}
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`Event::read_namespaced_event`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReadNamespacedEventOptional<'a> {
/// Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
pub exact: Option<bool>,
/// Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
pub export: Option<bool>,
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReadNamespacedEventResponse as Response>::try_from_parts` to parse the HTTP response body of [`Event::read_namespaced_event`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadNamespacedEventResponse {
Ok(crate::api::events::v1::Event),
Other(Result<Option<crate::serde_json::Value>, crate::serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadNamespacedEventResponse {
fn try_from_parts(status_code: crate::http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
crate::http::StatusCode::OK => {
let result = match crate::serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadNamespacedEventResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match crate::serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadNamespacedEventResponse::Other(result), read))
},
}
}
}
// Generated from operation replaceEventsV1NamespacedEvent
impl Event {
/// replace the specified Event
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ReplaceResponse`]`<Self>>` constructor, or [`crate::ReplaceResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the Event
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace_namespaced_event(
name: &str,
namespace: &str,
body: &crate::api::events::v1::Event,
optional: crate::ReplaceOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::ReplaceResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/events.k8s.io/v1/namespaces/{namespace}/events/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::put(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation watchEventsV1EventForAllNamespaces
impl Event {
/// list or watch objects of kind Event
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::WatchResponse`]`<Self>>` constructor, or [`crate::WatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_event_for_all_namespaces(
optional: crate::WatchOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::WatchResponse<Self>>), crate::RequestError> {
let __url = "/apis/events.k8s.io/v1/events?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation watchEventsV1NamespacedEvent
impl Event {
/// list or watch objects of kind Event
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::WatchResponse`]`<Self>>` constructor, or [`crate::WatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_namespaced_event(
namespace: &str,
optional: crate::WatchOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::WatchResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/events.k8s.io/v1/namespaces/{namespace}/events?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// End events.k8s.io/v1/Event
impl crate::Resource for Event {
const API_VERSION: &'static str = "events.k8s.io/v1";
const GROUP: &'static str = "events.k8s.io";
const KIND: &'static str = "Event";
const VERSION: &'static str = "v1";
}
impl crate::ListableResource for Event {
const LIST_KIND: &'static str = concat!("Event", "List");
}
impl crate::Metadata for Event {
type Ty = crate::apimachinery::pkg::apis::meta::v1::ObjectMeta;
fn metadata(&self) -> &<Self as crate::Metadata>::Ty {
&self.metadata
}
fn metadata_mut(&mut self) -> &mut<Self as crate::Metadata>::Ty {
&mut self.metadata
}
}
impl<'de> crate::serde::Deserialize<'de> for Event {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_api_version,
Key_kind,
Key_action,
Key_deprecated_count,
Key_deprecated_first_timestamp,
Key_deprecated_last_timestamp,
Key_deprecated_source,
Key_event_time,
Key_metadata,
Key_note,
Key_reason,
Key_regarding,
Key_related,
Key_reporting_controller,
Key_reporting_instance,
Key_series,
Key_type_,
Other,
}
impl<'de> crate::serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error {
Ok(match v {
"apiVersion" => Field::Key_api_version,
"kind" => Field::Key_kind,
"action" => Field::Key_action,
"deprecatedCount" => Field::Key_deprecated_count,
"deprecatedFirstTimestamp" => Field::Key_deprecated_first_timestamp,
"deprecatedLastTimestamp" => Field::Key_deprecated_last_timestamp,
"deprecatedSource" => Field::Key_deprecated_source,
"eventTime" => Field::Key_event_time,
"metadata" => Field::Key_metadata,
"note" => Field::Key_note,
"reason" => Field::Key_reason,
"regarding" => Field::Key_regarding,
"related" => Field::Key_related,
"reportingController" => Field::Key_reporting_controller,
"reportingInstance" => Field::Key_reporting_instance,
"series" => Field::Key_series,
"type" => Field::Key_type_,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = Event;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result
|
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> {
let mut value_action: Option<String> = None;
let mut value_deprecated_count: Option<i32> = None;
let mut value_deprecated_first_timestamp: Option<crate::apimachinery::pkg::apis::meta::v1::Time> = None;
let mut value_deprecated_last_timestamp: Option<crate::apimachinery::pkg::apis::meta::v1::Time> = None;
let mut value_deprecated_source: Option<crate::api::core::v1::EventSource> = None;
let mut value_event_time: Option<crate::apimachinery::pkg::apis::meta::v1::MicroTime> = None;
let mut value_metadata: Option<crate::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None;
let mut value_note: Option<String> = None;
let mut value_reason: Option<String> = None;
let mut value_regarding: Option<crate::api::core::v1::ObjectReference> = None;
let mut value_related: Option<crate::api::core::v1::ObjectReference> = None;
let mut value_reporting_controller: Option<String> = None;
let mut value_reporting_instance: Option<String> = None;
let mut value_series: Option<crate::api::events::v1::EventSeries> = None;
let mut value_type_: Option<String> = None;
while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_api_version => {
let value_api_version: String = crate::serde::de::MapAccess::next_value(&mut map)?;
if value_api_version != <Self::Value as crate::Resource>::API_VERSION {
return Err(crate::serde::de::Error::invalid_value(crate::serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::API_VERSION));
}
},
Field::Key_kind => {
let value_kind: String = crate::serde::de::MapAccess::next_value(&mut map)?;
if value_kind != <Self::Value as crate::Resource>::KIND {
return Err(crate::serde::de::Error::invalid_value(crate::serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::KIND));
}
},
Field::Key_action => value_action = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_deprecated_count => value_deprecated_count = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_deprecated_first_timestamp => value_deprecated_first_timestamp = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_deprecated_last_timestamp => value_deprecated_last_timestamp = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_deprecated_source => value_deprecated_source = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_event_time => value_event_time = Some(crate::serde::de::MapAccess::next_value(&mut map)?),
Field::Key_metadata => value_metadata = Some(crate::serde::de::MapAccess::next_value(&mut map)?),
Field::Key_note => value_note = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_reason => value_reason = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_regarding => value_regarding = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_related => value_related = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_reporting_controller => value_reporting_controller = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_reporting_instance => value_reporting_instance = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_series => value_series = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_type_ => value_type_ = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(Event {
action: value_action,
deprecated_count: value_deprecated_count,
deprecated_first_timestamp: value_deprecated_first_timestamp,
deprecated_last_timestamp: value_deprecated_last_timestamp,
deprecated_source: value_deprecated_source,
event_time: value_event_time.ok_or_else(|| crate::serde::de::Error::missing_field("eventTime"))?,
metadata: value_metadata.ok_or_else(|| crate::serde::de::Error::missing_field("metadata"))?,
note: value_note,
reason: value_reason,
regarding: value_regarding,
related: value_related,
reporting_controller: value_reporting_controller,
reporting_instance: value_reporting_instance,
series: value_series,
type_: value_type_,
})
}
}
deserializer.deserialize_struct(
<Self as crate::Resource>::KIND,
&[
"apiVersion",
"kind",
"action",
"deprecatedCount",
"deprecatedFirstTimestamp",
"deprecatedLastTimestamp",
"deprecatedSource",
"eventTime",
"metadata",
"note",
"reason",
"regarding",
"related",
"reportingController",
"reportingInstance",
"series",
"type",
],
Visitor,
)
}
}
impl crate::serde::Serialize for Event {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer {
let mut state = serializer.serialize_struct(
<Self as crate::Resource>::KIND,
4 +
self.action.as_ref().map_or(0, |_| 1) +
self.deprecated_count.as_ref().map_or(0, |_| 1) +
self.deprecated_first_timestamp.as_ref().map_or(0, |_| 1) +
self.deprecated_last_timestamp.as_ref().map_or(0, |_| 1) +
self.deprecated_source.as_ref().map_or(0, |_| 1) +
self.note.as_ref().map_or(0, |_| 1) +
self.reason.as_ref().map_or(0, |_| 1) +
self.regarding.as_ref().map_or(0, |_| 1) +
self.related.as_ref().map_or(0, |_| 1) +
self.reporting_controller.as_ref().map_or(0, |_| 1) +
self.reporting_instance.as_ref().map_or(0, |_| 1) +
self.series.as_ref().map_or(0, |_| 1) +
self.type_.as_ref().map_or(0, |_| 1),
)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::API_VERSION)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::KIND)?;
if let Some(value) = &self.action {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "action", value)?;
}
if let Some(value) = &self.deprecated_count {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "deprecatedCount", value)?;
}
if let Some(value) = &self.deprecated_first_timestamp {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "deprecatedFirstTimestamp", value)?;
}
if let Some(value) = &self.deprecated_last_timestamp {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "deprecatedLastTimestamp", value)?;
}
if let Some(value) = &self.deprecated_source {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "deprecatedSource", value)?;
}
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "eventTime", &self.event_time)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", &self.metadata)?;
if let Some(value) = &self.note {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "note", value)?;
}
if let Some(value) = &self.reason {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "reason", value)?;
}
if let Some(value) = &self.regarding {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "regarding", value)?;
}
if let Some(value) = &self.related {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "related", value)?;
}
if let Some(value) = &self.reporting_controller {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "reportingController", value)?;
}
if let Some(value) = &self.reporting_instance {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "reportingInstance", value)?;
}
if let Some(value) = &self.series {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "series", value)?;
}
if let Some(value) = &self.type_ {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "type", value)?;
}
crate::serde::ser::SerializeStruct::end(state)
}
}
|
{
f.write_str(<Self::Value as crate::Resource>::KIND)
}
|
run.js
|
console.log('collapse bot injected');
chrome.runtime.onMessage.addListener(
({keyword, isCollapse, isRegex}) => {
if (!keyword) {
return;
}
let domList;
if (isRegex) {
domList = [].slice.call(document.querySelectorAll('.file-header'))
.filter(node => new RegExp(keyword).test(node.dataset.path))
.map(node => node.querySelector(`button[aria-expanded=${isCollapse ? 'true' : 'false'}]`));
} else {
domList = document.querySelectorAll(`.file-header[data-path*="${keyword}"] button[aria-expanded=${isCollapse ? 'true' : 'false'}]`);
}
domList.forEach(dom => {
if (!dom) {
return;
}
dom.click();
// Seems like there is a bug on github page that aria-expanded is not updated correctly, so let's update it here
dom.setAttribute('aria-expanded', !isCollapse);
});
// Note: Returning true is required here!
// ref: http://stackoverflow.com/questions/20077487/chrome-extension-message-passing-response-not-sent
return true;
}
|
);
|
|
ui.rs
|
use bevy::prelude::*;
/// This example illustrates the various features of Bevy UI.
fn main()
|
fn setup(
commands: &mut Commands,
asset_server: Res<AssetServer>,
mut materials: ResMut<Assets<ColorMaterial>>,
) {
commands
// ui camera
.spawn(CameraUiBundle::default())
// root node
.spawn(NodeBundle {
style: Style {
size: Size::new(Val::Percent(100.0), Val::Percent(100.0)),
justify_content: JustifyContent::SpaceBetween,
..Default::default()
},
material: materials.add(Color::NONE.into()),
..Default::default()
})
.with_children(|parent| {
parent
// left vertical fill (border)
.spawn(NodeBundle {
style: Style {
size: Size::new(Val::Px(200.0), Val::Percent(100.0)),
border: Rect::all(Val::Px(2.0)),
..Default::default()
},
material: materials.add(Color::rgb(0.65, 0.65, 0.65).into()),
..Default::default()
})
.with_children(|parent| {
parent
// left vertical fill (content)
.spawn(NodeBundle {
style: Style {
size: Size::new(Val::Percent(100.0), Val::Percent(100.0)),
align_items: AlignItems::FlexEnd,
..Default::default()
},
material: materials.add(Color::rgb(0.15, 0.15, 0.15).into()),
..Default::default()
})
.with_children(|parent| {
// text
parent.spawn(TextBundle {
style: Style {
margin: Rect::all(Val::Px(5.0)),
..Default::default()
},
text: Text::with_section(
"Text Example",
TextStyle {
font: asset_server.load("fonts/FiraSans-Bold.ttf"),
font_size: 30.0,
color: Color::WHITE,
},
Default::default(),
),
..Default::default()
});
});
})
// right vertical fill
.spawn(NodeBundle {
style: Style {
size: Size::new(Val::Px(200.0), Val::Percent(100.0)),
..Default::default()
},
material: materials.add(Color::rgb(0.15, 0.15, 0.15).into()),
..Default::default()
})
// absolute positioning
.spawn(NodeBundle {
style: Style {
size: Size::new(Val::Px(200.0), Val::Px(200.0)),
position_type: PositionType::Absolute,
position: Rect {
left: Val::Px(210.0),
bottom: Val::Px(10.0),
..Default::default()
},
border: Rect::all(Val::Px(20.0)),
..Default::default()
},
material: materials.add(Color::rgb(0.4, 0.4, 1.0).into()),
..Default::default()
})
.with_children(|parent| {
parent.spawn(NodeBundle {
style: Style {
size: Size::new(Val::Percent(100.0), Val::Percent(100.0)),
..Default::default()
},
material: materials.add(Color::rgb(0.8, 0.8, 1.0).into()),
..Default::default()
});
})
// render order test: reddest in the back, whitest in the front (flex center)
.spawn(NodeBundle {
style: Style {
size: Size::new(Val::Percent(100.0), Val::Percent(100.0)),
position_type: PositionType::Absolute,
align_items: AlignItems::Center,
justify_content: JustifyContent::Center,
..Default::default()
},
material: materials.add(Color::NONE.into()),
..Default::default()
})
.with_children(|parent| {
parent
.spawn(NodeBundle {
style: Style {
size: Size::new(Val::Px(100.0), Val::Px(100.0)),
..Default::default()
},
material: materials.add(Color::rgb(1.0, 0.0, 0.0).into()),
..Default::default()
})
.with_children(|parent| {
parent
.spawn(NodeBundle {
style: Style {
size: Size::new(Val::Px(100.0), Val::Px(100.0)),
position_type: PositionType::Absolute,
position: Rect {
left: Val::Px(20.0),
bottom: Val::Px(20.0),
..Default::default()
},
..Default::default()
},
material: materials.add(Color::rgb(1.0, 0.3, 0.3).into()),
..Default::default()
})
.spawn(NodeBundle {
style: Style {
size: Size::new(Val::Px(100.0), Val::Px(100.0)),
position_type: PositionType::Absolute,
position: Rect {
left: Val::Px(40.0),
bottom: Val::Px(40.0),
..Default::default()
},
..Default::default()
},
material: materials.add(Color::rgb(1.0, 0.5, 0.5).into()),
..Default::default()
})
.spawn(NodeBundle {
style: Style {
size: Size::new(Val::Px(100.0), Val::Px(100.0)),
position_type: PositionType::Absolute,
position: Rect {
left: Val::Px(60.0),
bottom: Val::Px(60.0),
..Default::default()
},
..Default::default()
},
material: materials.add(Color::rgb(1.0, 0.7, 0.7).into()),
..Default::default()
})
// alpha test
.spawn(NodeBundle {
style: Style {
size: Size::new(Val::Px(100.0), Val::Px(100.0)),
position_type: PositionType::Absolute,
position: Rect {
left: Val::Px(80.0),
bottom: Val::Px(80.0),
..Default::default()
},
..Default::default()
},
material: materials.add(Color::rgba(1.0, 0.9, 0.9, 0.4).into()),
..Default::default()
});
});
})
// bevy logo (flex center)
.spawn(NodeBundle {
style: Style {
size: Size::new(Val::Percent(100.0), Val::Percent(100.0)),
position_type: PositionType::Absolute,
justify_content: JustifyContent::Center,
align_items: AlignItems::FlexEnd,
..Default::default()
},
material: materials.add(Color::NONE.into()),
..Default::default()
})
.with_children(|parent| {
// bevy logo (image)
parent.spawn(ImageBundle {
style: Style {
size: Size::new(Val::Px(500.0), Val::Auto),
..Default::default()
},
material: materials
.add(asset_server.load("branding/bevy_logo_dark_big.png").into()),
..Default::default()
});
});
});
}
|
{
App::build()
.add_plugins(DefaultPlugins)
.add_startup_system(setup.system())
.run();
}
|
package.disabled.py
|
import hashlib
import itertools
import urllib
import sys
import os
import re
import pipes
import AssetPackager
from bs4 import BeautifulSoup
# Packager!
# Puts CSS and JavaScript files referenced in your html together as one, compressed
# (via YUI compressor) and concatenated, with the help of the AssetPackager.
#
# Having lots of HTTP requests is expensive. In an ideal world your page would have one
# CSS file and one JS file. It's possible to get overagressive here and include something
# big that is only needed on, say, 1 one of your rarely used pages. There's lots of
# tradeoffs and heuristics we could use based on frequency of requests and likelihood
# of assets being requested at a given time. Packager takes a simple approach: analyze
# the assets that appear on each page, and bucket them according to which ones appear
# together. In the simplest case, a site with one page that references assets A,B,C
# will be able to confidently create a single package (file) containing all 3. If we
# add 12 more pages to the site and they all contain A,B, packager will build one package
# of A,B and one of C since C doesn't *always* appear with A,B. It's naive, but it works.
# Packager is NOT a dependency manager! It's naive and just looks at an HTML tree
# and figures out a resonable way to bundle things together to reduce requests.
# Features:
# Preserves original asset order.
# Supports blacklisting of assest with data-nopackage
# Downloads and packages remote assets so you can package your site's base function with your js framework
# Compresses all CSS/JS, even if it's included inline
# Known limitations:
# 1. Does not support @import syntax in css
# 2. If your script tags aren't all in one spot in the markup, it's possible that packaging could
# force them all together. This is something to be aware of if you've written scripts that
# expect themselves to come both before and after some other html (or if you're doing some
# sketchy document.writes or something).
|
# sudo pip install beautifulsoup4
## CONFIGURATION ##
# For trying packaging on localhost or debugging, set to True which runs packaging on every build.
# Otherwise set to False to only package on deploy. You may have to clean up autogen files manually:
PACKAGE_LOCALLY_DEBUG = False
INCLUDE_REMOTE_ASSETS = True # whether to fetch and package remotely hosted files
MINIFY_FILENAMES = False # otherwise all package filenames will be a concatenation of the filenames within
COMPRESS_PACKAGES = True
INCLUDE_ORIGINAL_FILENAMES_IN_COMMENTS = True
PACKAGE_CSS = True
PACKAGE_JS = True
AUTOGEN_PREFIX = 'cx_' # file prefix for packaged files
localpath_re = re.compile('^(?!http|\/\/)')
relativedir_re = re.compile('^(\.+\/)+')
shortfilename_re = re.compile('(\.js|\.css)$')
assets = []
inline_assets = set()
def _isLocalFile(path):
return re.match(localpath_re, path)
def _staticPath(site, includeBuild=False):
static = os.path.relpath(site.paths['static'], site.path)
if includeBuild:
static = os.path.join(site.paths['build'], static)
return static
def _withoutStatic(site, url):
return os.path.relpath(url, _staticPath(site))
def _relToStaticBuild(site, url):
return os.path.join(_staticPath(site, includeBuild=True), url)
def _getDir(path):
if os.path.isdir(path):
return path
else:
return os.path.dirname(path)
def _getLinks(soup):
def _isValid(tag):
if tag.name != 'link' and tag.name != 'style' or \
tag.has_attr('data-nopackage'):
return False
if tag.name == 'link':
href = tag.get('href')
if not href or \
'stylesheet' not in tag.get('rel') or \
not (INCLUDE_REMOTE_ASSETS or _isLocalFile(href)):
return False
return True
return soup.find_all(_isValid)
def _getScripts(soup):
def _isValid(tag):
src = tag.get('src')
if tag.name != 'script' or \
tag.has_attr('data-nopackage') or \
not (INCLUDE_REMOTE_ASSETS or not src or _isLocalFile(src)):
return False
return True
return soup.find_all(_isValid)
def _getAssetFrom(tag, site, save=False):
url = tag.get('href') or tag.get('src') or None
if url:
# normalize across subdirectories by removing leading "./" or "../"
url = re.sub(relativedir_re, '', url)
if url.startswith(_staticPath(site)):
# change 'static/js/foo' to '/full/absolute/static/.build/static/js/foo'
url = _relToStaticBuild(site, _withoutStatic(site, url))
else:
extension = 'css' if tag.name == 'style' else 'js'
contents = tag.renderContents()
url = 'inline_%s_%s.%s' % (
extension,
hashlib.md5(contents).hexdigest(),
extension
)
url = _relToStaticBuild(site, url)
if save:
inline_assets.add(url) # for cleanup later
with open(url, 'w') as f:
f.write(contents)
return url
def _replaceHTMLWithPackaged(html, replace_map, path, site):
soup = BeautifulSoup(html)
replaced = []
for tag in _getLinks(soup) + _getScripts(soup):
asset = _getAssetFrom(tag, site)
if asset not in replace_map:
continue
path_to_static = os.path.relpath(_staticPath(site, includeBuild=True), _getDir(path))
new_url = os.path.join(path_to_static, replace_map[asset])
if new_url in replaced:
# remove HTML node; this was already covered by another node with same package
tag.extract()
else:
# replace assets with packaged version, but just once per package
replaced.append(new_url)
# update the actual HTML
if tag.name == 'script':
if not tag.get('src'): # inline scripts
tag.clear()
tag['src'] = urllib.quote(new_url, '/:')
else:
if tag.name == 'style': # inline styles
new_tag = soup.new_tag('link', rel="stylesheet")
tag.replace_with(new_tag)
tag = new_tag
tag['href'] = urllib.quote(new_url, '/:')
return str(soup)
def _getPackagedFilename(path_list):
def shortFileName(path):
return re.sub(shortfilename_re, '', os.path.basename(path))
split = path_list[-1].rsplit('.', 1)
extension = '.' + split[1] if len(split) > 1 else ''
merged_name = '__'.join(map(shortFileName, path_list)) + extension
if MINIFY_FILENAMES:
merged_name = hashlib.md5(merged_name).hexdigest()[:7] + extension
subdir = 'css' if extension.endswith('css') else 'js'
filename = os.path.join(subdir, AUTOGEN_PREFIX + merged_name)
no_local_paths = not filter(lambda p: _isLocalFile(p), path_list)
return filename, no_local_paths
def analyzeAndPackageAssets(site):
sys.stdout.write('Analyzing %d gathered assets across %d pages...' %
(len(list(itertools.chain.from_iterable(assets))), len(assets))
)
sys.stdout.flush()
replace_map = {}
# determine what should be packaged with what
packages = AssetPackager.analyze(assets)
print('done')
for i, package in enumerate(packages):
sys.stdout.write(
'\rPacking analyzed assets into %d packages (%d/%d)' %
(len(packages), i + 1, len(packages))
)
sys.stdout.flush()
packaged_filename, no_local = _getPackagedFilename(package)
if len(package) <= 1 and (no_local or not COMPRESS_PACKAGES):
# it would be silly to compress a remote file and "package it with itself"
# also silly for a local file to be packaged with itself if we won't be compressing it
continue
# Create and save the packaged, minified files
AssetPackager.package(
package,
_relToStaticBuild(site, packaged_filename),
compress = COMPRESS_PACKAGES,
filename_markers_in_comments = INCLUDE_ORIGINAL_FILENAMES_IN_COMMENTS
)
for asset in package:
replace_map[asset] = packaged_filename
sys.stdout.write('\nUpdating HTML sources...')
sys.stdout.flush()
for page in site.pages():
path = page.paths['full-build']
with open(pipes.quote(path), 'r') as f:
html = _replaceHTMLWithPackaged(f.read(), replace_map, path, site)
f.close()
with open(pipes.quote(path), "wb") as f:
f.write(html)
f.close()
for asset in inline_assets:
os.remove(asset) # clean up temp buffers
print('done')
# CACTUS METHODS
def preBuild(site):
# disable symlinking so we don't end up with a mess of files
site.nosymlink = True
def postBuild(site):
if PACKAGE_LOCALLY_DEBUG:
analyzeAndPackageAssets(site)
def preDeploy(site):
if not PACKAGE_LOCALLY_DEBUG:
analyzeAndPackageAssets(site)
def postBuildPage(site, path):
# Skip non html pages
if not path.endswith('.html'):
return
with open(pipes.quote(path), 'r') as f:
soup = BeautifulSoup(f.read())
if PACKAGE_JS:
assets.append(map(lambda x: _getAssetFrom(x, site, save=True), _getScripts(soup)))
if PACKAGE_CSS:
assets.append(map(lambda x: _getAssetFrom(x, site, save=True), _getLinks(soup)))
def postDeploy(site):
# cleanup all static files that aren't used anymore
files = [f.path for f in site.files()]
keys = site.bucket.list(_staticPath(site))
unused = filter(lambda k: k.name not in files, keys)
if len(unused) > 0:
print '\nCleaning up %d unused static files on the server:' % len(unused)
for key in list(unused):
print 'D\t' + _withoutStatic(site, key.name)
site.bucket.delete_keys(unused)
|
## INSTALLATION:
# sudo pip install AssetPackager
|
Statistics_Computation.py
|
"""
Functions to compute various statistics of the data
and their associated errors.
"""
#Compute statistics about the variance of an estimator:
import numpy as np
from scipy import stats
import math
#The following functions as involved in estimating the standard
def Moment(Sample,k) :
"""
This function computes the kth moment of the
Sample.
"""
Mean = np.mean(Sample)
Moment = 0.0
for i in range(0,len(Sample) ) :
Moment = Moment + (Mean - Sample[i] )**k
return Moment/(len(Sample))
def D4(Sample) :
"""Compute the fourth central moment of a sample
See: https://en.wikipedia.org/wiki/Central_moment
for defintion"""
M = float( len(Sample) )
D4 = ((M-1)/(M**3))*( (M**2 - 3*M + 3)*Moment(Sample,4) + 3*(2*M -3)*Moment(Sample,2)**2 )
return D4
def VarSE(Sample) :
"""
Returns the standard error on the variance of the sample given.
Formula taken from:
Wonnapinij, Passorn, Patrick F. Chinnery, and David C. Samuels. "Previous estimates of mitochondrial DNA mutation level variance did not account for sampling error: comparing the mtDNA genetic bottleneck in mice and humans." The American Journal of Human Genetics 86.4 (2010): 540-550.
Parameters
-----------
Sample : list
list of values
Returns
-----------
SE : float
Standard error on the variance of a sample
"""
M = float( len(Sample) )
SE = ( (1.0/M)*(D4(Sample) - ( (M-3)/(M-1) )*np.var(Sample)**2 ) )**0.5
return SE
def CV(Sample) :
"""
Computes the coefficient of variation of the values.
Arguments
------------
Sample : list
"""
return np.std(Sample)/np.mean(Sample)
def
|
(Sample) :
"""
Compute the standard error on the coefficient of variation.
"""
try :
part1 = (VarSE(Sample)/( 2.0*(np.var(Sample)**0.5)*np.mean(Sample) ) )**2
part2 = ( (((np.var(Sample))**0.5 )*stats.sem(Sample)/(np.mean(Sample)**2) ) )**2
Coeff_ERR = ( part1 + part2 )**0.5
except :
print("Error encountered")
print("Sample length = " + str(len(Sample)))
print("Setting error to zero by defulat")
Coeff_ERR = 0.0
return Coeff_ERR
#Bootsrapping Tools:
#Given a sample we can estimate the standard error in the variance
def Bootstrap_SE(Sample,Redraws) :
Data_Points = len(Sample)
Var = np.var(Sample)
Vars = [ ]
for j in range(0,Redraws) :
Sample2 = [ ]
for i in range(0,(len(Sample))) :
#Draw a random integar less than the sample size
Entry = int(math.floor( np.random.uniform(0, (len(Sample)), 1) ) )
Sample2.append(Sample[Entry])
New_Var = np.var(Sample2)
Vars.append(New_Var)
Var_SE = stats.sem(Vars)
return (Redraws**0.5)*Var_SE
def Bootstrap_Mean_SE(Sample,Redraws) :
Data_Points = len(Sample)
Mean = np.mean(Sample)
Means = [ ]
for j in range(0,Redraws) :
Sample2 = [ ]
for i in range(0,(len(Sample))) :
#Draw a random integar less than the sample size
Entry = int(math.floor( np.random.uniform(0, (len(Sample)), 1) ) )
Sample2.append(Sample[Entry])
New_Mean = np.mean(Sample2)
Means.append(New_Mean)
Mean_SE = stats.sem(Means)
Boot_Mean = np.mean(Means)
return (Redraws**0.5)*Mean_SE
def Get_Spearman_Correlation(Array_1, Array_2, P_Val_Threshold=(10 ** (-4))):
"""
Returns the correlation between two arrays.
If p val > 10^-4 we simply report the correlation as zero
Parameters
--------------
Array_1 : list
Array_2 : list
P_Val_Threshold :float
Set a threshold for the p-value. If the p value
if greater than this threshold then we can set
the correlation to zero (ie. we are not confident
that the correlation exists).
"""
P_Val_Threshold = 1.1
Correlation, pval = stats.spearmanr(Array_1, Array_2)
if pval < P_Val_Threshold:
return Correlation
else:
return 0.0
def Bootstrap_Correlation_Confid_Int(Sample_1, Sample_2, Redraws=50):
"""
Use the bootstrapping method to estimate the confidence interval on the
Spearman Correlation
Returns the 95% confidence interval by default
(Does p-value threshold want to be an additional argument?)
Parameters
--------------
Sample_1 : list
First sample
Sample_2 : list
Second sample
Redraws : int
Number of times to same with replacement from the joint distribution
of sample_1 and sample_2
Returns
-----------
95% confidence interval on the Spearman correlation.
"""
Data_Points = len(Sample_1)
Original_Correlation = Get_Spearman_Correlation(Sample_1, Sample_2, P_Val_Threshold=20.0)
Correlations = []
Differences = []
for j in range(0, Redraws):
Sample2 = []
Redraw_Sample_1 = []
Redraw_Sample_2 = []
# Redraw the samples:
for i in range(0, (len(Sample_1))):
#Redraw pairs of values:
Entry = int(math.floor(np.random.uniform(0, (len(Sample_1)), 1)))
Redraw_Sample_1.append(Sample_1[Entry])
Redraw_Sample_2.append(Sample_2[Entry])
Redrawn_Correlation = Get_Spearman_Correlation(Redraw_Sample_1, Redraw_Sample_2, P_Val_Threshold=20.0)
Correlations.append(Redrawn_Correlation)
Differences.append(Redrawn_Correlation - Original_Correlation)
# sort the list of differences:
Sorted_Differences = np.sort(Differences)
return Sorted_Differences[int(math.ceil(0.95 * len(Differences)))]
|
CV_Error
|
CP.py
|
#
# Copyright (c) 2009-2016, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
n0 = 50
n1 = 50
display = False
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# Stack two 2D finite-difference matrices on top of each other
# and make the last column dense
def StackedFD2D(N0,N1):
|
A = StackedFD2D(n0,n1)
b = El.DistMultiVec()
El.Gaussian( b, 2*n0*n1, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
ctrl = El.LPAffineCtrl_d()
ctrl.mehrotraCtrl.progress = True
ctrl.mehrotraCtrl.solveCtrl.progress = True
startCP = El.mpi.Time()
x = El.CP( A, b, ctrl )
endCP = El.mpi.Time()
if worldRank == 0:
print "CP time:", endCP-startCP, "seconds"
if display:
El.Display( x, "x" )
bTwoNorm = El.Nrm2( b )
bInfNorm = El.MaxNorm( b )
r = El.DistMultiVec()
El.Copy( b, r )
El.Multiply( El.NORMAL, -1., A, x, 1., r )
if display:
El.Display( r, "r" )
rTwoNorm = El.Nrm2( r )
rInfNorm = El.MaxNorm( r )
if worldRank == 0:
print "|| b ||_2 =", bTwoNorm
print "|| b ||_oo =", bInfNorm
print "|| A x - b ||_2 =", rTwoNorm
print "|| A x - b ||_oo =", rInfNorm
startLS = El.mpi.Time()
xLS = El.LeastSquares(A,b)
endLS = El.mpi.Time()
if worldRank == 0:
print "LS time:", endLS-startLS, "seconds"
if display:
El.Display( xLS, "x_{LS}" )
rLS = El.DistMultiVec()
El.Copy( b, rLS )
El.Multiply( El.NORMAL, -1., A, xLS, 1., rLS )
if display:
El.Display( rLS, "A x_{LS} - b" )
rLSTwoNorm = El.Nrm2(rLS)
rLSInfNorm = El.MaxNorm(rLS)
if worldRank == 0:
print "|| A x_{LS} - b ||_2 =", rLSTwoNorm
print "|| A x_{LS} - b ||_oo =", rLSInfNorm
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
A = El.DistSparseMatrix()
height = 2*N0*N1
width = N0*N1
A.Resize(height,width)
localHeight = A.LocalHeight()
A.Reserve(6*localHeight)
for sLoc in xrange(localHeight):
s = A.GlobalRow(sLoc)
if s < N0*N1:
x0 = s % N0
x1 = s / N0
A.QueueLocalUpdate( sLoc, s, 11 )
if x0 > 0:
A.QueueLocalUpdate( sLoc, s-1, -1 )
if x0+1 < N0:
A.QueueLocalUpdate( sLoc, s+1, 2 )
if x1 > 0:
A.QueueLocalUpdate( sLoc, s-N0, -3 )
if x1+1 < N1:
A.QueueLocalUpdate( sLoc, s+N0, 4 )
else:
sRel = s-N0*N1
x0 = sRel % N0
x1 = sRel / N0
A.QueueLocalUpdate( sLoc, sRel, -2 )
if x0 > 0:
A.QueueLocalUpdate( sLoc, sRel-1, -1 )
if x0+1 < N0:
A.QueueLocalUpdate( sLoc, sRel+1, -2 )
if x1 > 0:
A.QueueLocalUpdate( sLoc, sRel-N0, -3 )
if x1+1 < N1:
A.QueueLocalUpdate( sLoc, sRel+N0, 3 )
# The dense last column
A.QueueLocalUpdate( sLoc, width-1, -10/height );
A.ProcessQueues()
return A
|
constants.go
|
package assembler_sp
import (
"encoding/binary"
"fmt"
"log"
"strings"
opcode_sp_type "github.com/swamp/opcodes/type"
)
type PackageConstants struct {
constants []*Constant
functions []*Constant
externalFunctions []*Constant
strings []*Constant
resourceNames []*Constant
dynamicMapper *DynamicMemoryMapper
someConstantIDCounter uint
}
func NewPackageConstants() *PackageConstants
|
func (c *PackageConstants) String() string {
s := "\n"
for _, constant := range c.constants {
if constant == nil {
panic("swamp assembler: nil constant")
}
s += fmt.Sprintf("%v\n", constant)
}
return strings.TrimSpace(s)
}
func (c *PackageConstants) Constants() []*Constant {
return c.constants
}
func (c *PackageConstants) Finalize() {
if len(c.resourceNames) == 0 {
return
}
pointerArea := c.dynamicMapper.Allocate(uint(int(opcode_sp_type.Sizeof64BitPointer)*len(c.resourceNames)), uint32(opcode_sp_type.Alignof64BitPointer), "Resource name chunk")
for index, resourceName := range c.resourceNames {
writePosition := pointerArea.Position + SourceDynamicMemoryPos(index*int(opcode_sp_type.Sizeof64BitPointer))
binary.LittleEndian.PutUint64(c.dynamicMapper.memory[writePosition:writePosition+SourceDynamicMemoryPos(opcode_sp_type.Sizeof64BitPointer)], uint64(resourceName.PosRange().Position))
}
var resourceNameChunkOctets [16]byte
binary.LittleEndian.PutUint64(resourceNameChunkOctets[0:8], uint64(pointerArea.Position))
binary.LittleEndian.PutUint64(resourceNameChunkOctets[8:16], uint64(len(c.resourceNames)))
resourceNameChunkPointer := c.dynamicMapper.WriteAlign(resourceNameChunkOctets[:], 8,"ResourceNameChunk struct (character-pointer-pointer, resourceNameCount)")
c.constants = append(c.constants, &Constant{
constantType: ConstantTypeResourceNameChunk,
str: "",
source: resourceNameChunkPointer,
debugString: "resource name chunk",
resourceNameId: 0,
})
}
func (c *PackageConstants) DynamicMemory() *DynamicMemoryMapper {
return c.dynamicMapper
}
func (c *PackageConstants) AllocateStringOctets(s string) SourceDynamicMemoryPosRange {
stringOctets := []byte(s)
stringOctets = append(stringOctets, byte(0))
stringOctetsPointer := c.dynamicMapper.Write(stringOctets, "string:"+s)
return stringOctetsPointer
}
const SizeofSwampString = 16
func (c *PackageConstants) AllocateStringConstant(s string) *Constant {
for _, constant := range c.strings {
if constant.str == s {
return constant
}
}
stringOctetsPointer := c.AllocateStringOctets(s)
var swampStringOctets [SizeofSwampString]byte
binary.LittleEndian.PutUint64(swampStringOctets[0:8], uint64(stringOctetsPointer.Position))
binary.LittleEndian.PutUint64(swampStringOctets[8:16], uint64(len(s)))
swampStringPointer := c.dynamicMapper.Write(swampStringOctets[:], "SwampString struct (character-pointer, characterCount) for:"+s)
newConstant := NewStringConstant("string", s, swampStringPointer)
c.constants = append(c.constants, newConstant)
c.strings = append(c.strings, newConstant)
return newConstant
}
func (c *PackageConstants) AllocateResourceNameConstant(s string) *Constant {
for _, resourceNameConstant := range c.resourceNames {
if resourceNameConstant.str == s {
return resourceNameConstant
}
}
stringOctetsPointer := c.AllocateStringOctets(s)
newConstant := NewResourceNameConstant(c.someConstantIDCounter, s, stringOctetsPointer)
c.someConstantIDCounter++
c.constants = append(c.constants, newConstant)
c.resourceNames = append(c.resourceNames, newConstant)
return newConstant
}
const (
SizeofSwampFunc = 11 * 8
SizeofSwampExternalFunc = 18 * 8
SizeofSwampDebugInfoLines = 2 * 8
SizeofSwampDebugInfoFiles = 2 * 8
)
func (c *PackageConstants) AllocateFunctionStruct(uniqueFullyQualifiedFunctionName string,
opcodesPointer SourceDynamicMemoryPosRange, returnOctetSize opcode_sp_type.MemorySize,
returnAlignSize opcode_sp_type.MemoryAlign, parameterCount uint, parameterOctetSize opcode_sp_type.MemorySize, typeIndex uint) (*Constant, error) {
var swampFuncStruct [SizeofSwampFunc]byte
fullyQualifiedStringPointer := c.AllocateStringOctets(uniqueFullyQualifiedFunctionName)
binary.LittleEndian.PutUint32(swampFuncStruct[0:4], uint32(0))
binary.LittleEndian.PutUint64(swampFuncStruct[8:16], uint64(parameterCount)) // parameterCount
binary.LittleEndian.PutUint64(swampFuncStruct[16:24], uint64(parameterOctetSize)) // parameters octet size
binary.LittleEndian.PutUint64(swampFuncStruct[24:32], uint64(opcodesPointer.Position))
binary.LittleEndian.PutUint64(swampFuncStruct[32:40], uint64(opcodesPointer.Size))
binary.LittleEndian.PutUint64(swampFuncStruct[40:48], uint64(returnOctetSize)) // returnOctetSize
binary.LittleEndian.PutUint64(swampFuncStruct[48:56], uint64(returnAlignSize)) // returnAlign
binary.LittleEndian.PutUint64(swampFuncStruct[56:64], uint64(fullyQualifiedStringPointer.Position)) // debugName
binary.LittleEndian.PutUint64(swampFuncStruct[64:72], uint64(typeIndex)) // typeIndex
binary.LittleEndian.PutUint64(swampFuncStruct[72:80], uint64(opcodesPointer.Position))
binary.LittleEndian.PutUint64(swampFuncStruct[80:88], uint64(opcodesPointer.Size))
funcPointer := c.dynamicMapper.WriteAlign(swampFuncStruct[:], 8, "function Struct for:"+uniqueFullyQualifiedFunctionName)
newConstant := NewFunctionReferenceConstantWithDebug("fn", uniqueFullyQualifiedFunctionName, funcPointer)
c.constants = append(c.constants, newConstant)
c.functions = append(c.functions, newConstant)
return newConstant, nil
}
func (c *PackageConstants) AllocateExternalFunctionStruct(uniqueFullyQualifiedFunctionName string, returnValue SourceStackPosRange, parameters []SourceStackPosRange) (*Constant, error) {
var swampFuncStruct [SizeofSwampExternalFunc]byte
fullyQualifiedStringPointer := c.AllocateStringOctets(uniqueFullyQualifiedFunctionName)
if len(parameters) == 0 {
// panic(fmt.Errorf("not allowed to have zero paramters for %v", uniqueFullyQualifiedFunctionName))
}
binary.LittleEndian.PutUint32(swampFuncStruct[0:4], uint32(1)) // external type
binary.LittleEndian.PutUint64(swampFuncStruct[8:16], uint64(len(parameters))) // parameterCount
binary.LittleEndian.PutUint32(swampFuncStruct[16:20], uint32(returnValue.Pos)) // return pos
binary.LittleEndian.PutUint32(swampFuncStruct[20:24], uint32(returnValue.Size)) // return size
for index, param := range parameters {
first := 24 + index*8
firstEnd := first + 8
second := 28 + index*8
secondEnd := second + 8
binary.LittleEndian.PutUint32(swampFuncStruct[first:firstEnd], uint32(param.Pos)) // params pos
binary.LittleEndian.PutUint32(swampFuncStruct[second:secondEnd], uint32(param.Size)) // params size
}
binary.LittleEndian.PutUint64(swampFuncStruct[120:128], uint64(fullyQualifiedStringPointer.Position)) // debugName
funcPointer := c.dynamicMapper.WriteAlign(swampFuncStruct[:], 8, fmt.Sprintf("external function Struct for: '%s' param Count: %d", uniqueFullyQualifiedFunctionName, len(parameters)))
newConstant := NewExternalFunctionReferenceConstantWithDebug("fn", uniqueFullyQualifiedFunctionName, funcPointer)
c.constants = append(c.constants, newConstant)
c.externalFunctions = append(c.externalFunctions, newConstant)
return newConstant, nil
}
func (c *PackageConstants) allocateDebugLinesStruct(count uint, debugLineOctets []byte, uniqueFullyQualifiedFunctionName string) SourceDynamicMemoryPosRange {
var swampFuncStruct [SizeofSwampDebugInfoLines]byte
debugLinesLinesPointer := c.dynamicMapper.WriteAlign(debugLineOctets, 2,"debug lines lines")
binary.LittleEndian.PutUint32(swampFuncStruct[0:4], uint32(count))
binary.LittleEndian.PutUint64(swampFuncStruct[8:16], uint64(debugLinesLinesPointer.Position))
pointerToDebugLines := c.dynamicMapper.WriteAlign(swampFuncStruct[:], 4, "debug lines lines:"+uniqueFullyQualifiedFunctionName)
return pointerToDebugLines
}
func (c *PackageConstants) AllocateDebugInfoFiles(fileUrls []*FileUrl) (*Constant, error) {
var debugInfoFilesStruct [SizeofSwampDebugInfoFiles]byte
stringPointers := make([]SourceDynamicMemoryPosRange, len(fileUrls))
for index, fileUrl := range fileUrls {
ptr := c.AllocateStringOctets(fileUrl.File)
stringPointers[index] = ptr
}
spaceForArrayWithPointers := make([]byte, 8 * len(stringPointers))
for index, stringPointer := range stringPointers {
binary.LittleEndian.PutUint64(spaceForArrayWithPointers[index*8: index*8+8], uint64(stringPointer.Position))
}
arrayStart := c.dynamicMapper.WriteAlign(spaceForArrayWithPointers, 8, "array with pointers")
binary.LittleEndian.PutUint32(debugInfoFilesStruct[0:4], uint32(len(fileUrls)))
binary.LittleEndian.PutUint64(debugInfoFilesStruct[8:8+8], uint64(arrayStart.Position))
debugInfoFilesPtr := c.dynamicMapper.WriteAlign(debugInfoFilesStruct[:], 8,"debug info files")
newConstant := NewDebugInfoFilesWithDebug("debug info files", debugInfoFilesPtr)
c.constants = append(c.constants, newConstant)
return newConstant, nil
}
/*
func (c *PackageConstants) AllocateDebugInfoLines(instructions []*opcode_sp.Instruction) (*Constant, error) {
var swampFuncStruct [SizeofSwampDebugInfoLines]byte
binary.LittleEndian.PutUint32(swampFuncStruct[0:4], uint32(0))
funcPointer := c.dynamicMapper.Write(swampFuncStruct[:], "function Struct for:")
newConstant := NewFunctionReferenceConstantWithDebug("fn", "uniqueFullyQualifiedFunctionName", funcPointer)
c.constants = append(c.constants, newConstant)
return newConstant, nil
}
*/
const SwampFuncOpcodeOffset = 24
const SwampFuncDebugLinesOffset = 72
func (c *PackageConstants) FetchOpcodes(functionConstant *Constant) []byte {
readSection := SourceDynamicMemoryPosRange{
Position: SourceDynamicMemoryPos(uint(functionConstant.source.Position + SwampFuncOpcodeOffset)),
Size: DynamicMemoryRange(8 + 8),
}
opcodePointerAndSize := c.dynamicMapper.Read(readSection)
opcodePosition := binary.LittleEndian.Uint64(opcodePointerAndSize[0:8])
opcodeSize := binary.LittleEndian.Uint64(opcodePointerAndSize[8:16])
readOpcodeSection := SourceDynamicMemoryPosRange{
Position: SourceDynamicMemoryPos(opcodePosition),
Size: DynamicMemoryRange(opcodeSize),
}
return c.dynamicMapper.Read(readOpcodeSection)
}
func (c *PackageConstants) AllocatePrepareFunctionConstant(uniqueFullyQualifiedFunctionName string,
returnSize opcode_sp_type.MemorySize, returnAlign opcode_sp_type.MemoryAlign,
parameterCount uint, parameterOctetSize opcode_sp_type.MemorySize, typeId uint) (*Constant, error) {
pointer := SourceDynamicMemoryPosRange{
Position: 0,
Size: 0,
}
return c.AllocateFunctionStruct(uniqueFullyQualifiedFunctionName, pointer, returnSize, returnAlign,
parameterCount, parameterOctetSize, typeId)
}
func (c *PackageConstants) AllocatePrepareExternalFunctionConstant(uniqueFullyQualifiedFunctionName string, returnValue SourceStackPosRange, parameters []SourceStackPosRange) (*Constant, error) {
return c.AllocateExternalFunctionStruct(uniqueFullyQualifiedFunctionName, returnValue, parameters)
}
func (c *PackageConstants) DefineFunctionOpcodes(funcConstant *Constant, opcodes []byte) error {
opcodesPointer := c.dynamicMapper.Write(opcodes, "opcodes for:"+funcConstant.str)
overwritePointer := SourceDynamicMemoryPos(uint(funcConstant.PosRange().Position) + SwampFuncOpcodeOffset)
var opcodePointerOctets [16]byte
binary.LittleEndian.PutUint64(opcodePointerOctets[0:8], uint64(opcodesPointer.Position))
binary.LittleEndian.PutUint64(opcodePointerOctets[8:16], uint64(opcodesPointer.Size))
c.dynamicMapper.Overwrite(overwritePointer, opcodePointerOctets[:], "opcodepointer"+funcConstant.str)
return nil
}
func (c *PackageConstants) DefineFunctionDebugLines(funcConstant *Constant, count uint, debugInfoOctets []byte) error {
overwritePointer := SourceDynamicMemoryPos(uint(funcConstant.PosRange().Position) + SwampFuncDebugLinesOffset)
var debugLineOctets [16]byte
debugLinesStructPointer := c.allocateDebugLinesStruct(count, debugInfoOctets, funcConstant.FunctionReferenceFullyQualifiedName())
binary.LittleEndian.PutUint64(debugLineOctets[0:8], uint64(debugLinesStructPointer.Position))
binary.LittleEndian.PutUint64(debugLineOctets[8:16], uint64(debugLinesStructPointer.Size))
c.dynamicMapper.Overwrite(overwritePointer, debugLineOctets[:], "debugInfoOctets"+funcConstant.str)
return nil
}
func (c *PackageConstants) FindFunction(identifier VariableName) *Constant {
for _, constant := range c.functions {
if constant.str == string(identifier) {
return constant
}
}
return c.FindExternalFunction(identifier)
}
func (c *PackageConstants) FindExternalFunction(identifier VariableName) *Constant {
for _, constant := range c.externalFunctions {
if constant.str == string(identifier) {
return constant
}
}
log.Printf("couldn't find constant external function %v", identifier)
c.DebugOutput()
return nil
}
func (c *PackageConstants) FindStringConstant(s string) *Constant {
for _, constant := range c.strings {
if constant.str == s {
return constant
}
}
return nil
}
func (c *PackageConstants) DebugOutput() {
log.Printf("functions:\n")
for _, function := range c.functions {
log.Printf("%v %v\n", function.str, function.debugString)
}
}
|
{
return &PackageConstants{
dynamicMapper: DynamicMemoryMapperNew(128 * 1024),
}
}
|
Ralph.tsx
|
import React, { Component } from 'react'
export default class Ralph extends Component<any,any> {
render() {
return (
<div className = "ralph">
<h2>{this.props.device_name}(Ralph)</h2>
<h3>Server: Home</h3>
<button style ={this.props.active_status? {backgroundColor:"red"}:{backgroundColor:"green"}}
className = "ralph-activate-button"> {this.props.active_status? "Deactivate":"Activate" }</button>
|
}
}
|
<button className = "ralph-control-button">Control</button>
</div>
)
|
test_itertools.py
|
from test_env import TestEnv
import sys
import unittest
from pythran.typing import List
@TestEnv.module
class TestItertools(TestEnv):
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_imap(self):
self.run_test("def imap_(l0,v): from itertools import imap; return sum(imap(lambda x:x*v, l0))", [0,1,2], 2, imap_=[List[int], int])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_imap_on_generator(self):
self.run_test("def imap_on_generator(l,v): from itertools import imap; return sum(imap(lambda x:x*v, (y for x in l for y in xrange(x))))", [2,3,5], 1, imap_on_generator=[List[int], int])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_imap2(self):
self.run_test("def imap2_(l0, l1,v): from itertools import imap; return sum(imap(lambda x,y:x*v+y, l0, l1))", [0,1,2], [0.,1.1,2.2], 1, imap2_=[List[int], List[float], int])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_imap2_ineq_size(self):
""" Check imap with different size for the two list operand. """
self.run_test("""
def imap2_ineq_size(l0, l1, v):
from itertools import imap
return sum(imap(lambda x, y : x * v + y, l0, l1))""",
[0, 1, 2, 3], [0., 1.1, 2.2], 1,
imap2_ineq_size=[List[int], List[float], int])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_imap2_on_generator(self):
self.run_test("def imap2_on_generator(l0,l1,v): from itertools import imap; return sum(imap(lambda x,y:x*v+y, (z*z for x in l0 for z in xrange(x)), (z*2 for y in l1 for z in xrange(y))))", [0,1,2,3], [3,2,1,0], 2, imap2_on_generator=[List[int], List[int], int])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_imap_none(self):
self.run_test("""
def imap_none(l0):
from itertools import imap
t= 0
for a in imap(None, l0) :
t += a[0]
return t
""", [0,1,2], imap_none=[List[int]])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_imap_none2(self):
self.run_test("""
def imap_none2(l0):
from itertools import imap
t=0
for a in imap(None, l0, l0) :
t += sum(a)
return t
""", [0,1,2], imap_none2=[List[int]])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_imap_none_on_generators(self):
self.run_test("""
def imap_none_g(l0):
from itertools import imap
t= 0
for a in imap(None, (y for x in l0 for y in xrange(x))) :
t += a[0]
return t
""", [0,1,2], imap_none_g=[List[int]])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_imap_none2_on_generators(self):
self.run_test("""
def imap_none2_g(l0):
from itertools import imap
t=0
for a in imap(None, (z for x in l0 for z in xrange(x)), (z for y in l0 for z in xrange(y))) :
t += sum(a)
return t
""", [0,1,2], imap_none2_g=[List[int]])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_ifilter_init(self):
self.run_test("def ifilter_init(l0): from itertools import ifilter; return list(ifilter(lambda x: x > 2 , l0))", [0,1,2,3,4,5], ifilter_init=[List[int]])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_ifilter_final(self):
self.run_test("def ifilter_final(l0): from itertools import ifilter; return list(ifilter(lambda x: x < 2, l0))", [0,1,2,3,4,5], ifilter_final=[List[int]])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def
|
(self):
self.run_test("def ifilterg_(l0): from itertools import ifilter; return list(ifilter(lambda x: (x % 2) == 1, (y for x in l0 for y in xrange(x))))", [0,1,2,3,4,5], ifilterg_=[List[int]])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_ifilter_none(self):
self.run_test("""
def ifiltern_(l0):
from itertools import ifilter;
s = 0
for b in (ifilter(None, l0)):
s += 1
return b,s
""", [True,False,True,True], ifiltern_=[List[bool]])
def test_product(self):
self.run_test("def product_(l0,l1): from itertools import product; return sum(map(lambda (x,y) : x*y, product(l0,l1)))", [0,1,2,3,4,5], [10,11], product_=[List[int],List[int]])
def test_product_on_generator(self):
self.run_test("def product_g(l0,l1): from itertools import product; return sum(map(lambda (x,y) : x*y, product((y for x in l0 for y in xrange(x)),(y for x in l1 for y in xrange(x)))))", [0,1,2,3,4], [4,3,2,1,0], product_g=[List[int],List[int]])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_itertools(self):
self.run_test("def test_it(l0,l1): import itertools; return sum(itertools.imap(lambda (x,y) : x*y, itertools.product(itertools.ifilter(lambda x : x > 2, l0), itertools.ifilter(lambda x : x < 12, l1))))", [0,1,2,3,4,5], [10,11,12,13,14,15], test_it=[List[int],List[int]])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_izip(self):
self.run_test("def izip_(l0,l1): from itertools import izip; return sum(map(lambda (x,y) : x*y, izip(l0,l1)))", [0,1,2], [10,11,12], izip_=[List[int],List[int]])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_izip_on_generator(self):
self.run_test("def izipg_(l0,l1): from itertools import izip; return sum(map(lambda (x,y) : x*y, izip((z for x in l0 for z in xrange(x)),(z for x in l1 for z in xrange(x)))))", [0,1,2,3], [3,2,1,0], izipg_=[List[int],List[int]])
def test_islice0(self):
self.run_test("def islice0(l): from itertools import islice ; return [x for x in islice(l, 1,30,3)]", list(range(100)), islice0=[List[int]])
def test_islice1(self):
self.run_test("def islice1(l): from itertools import islice ; return [x for x in islice(l, 16)]", list(range(100)), islice1=[List[int]])
def test_count0(self):
self.run_test("def count0(): from itertools import count ; c = count() ; next(c); next(c); return next(c)", count0=[])
def test_count1(self):
self.run_test("def count1(n): from itertools import count ; c = count(n) ; next(c); next(c); return next(c)", 100, count1=[int])
def test_count2(self):
self.run_test("def count2(n): from itertools import count ; c = count(n,3.2) ; next(c); next(c); return next(c)", 100, count2=[int])
def test_count3(self):
self.run_test("def count3(n):\n from itertools import count\n j = 1\n for i in count(n):\n if i == 10: return j\n else: j +=1", 1, count3=[int])
def test_next_enumerate(self):
self.run_test("def next_enumerate(n): x = enumerate(n) ; next(x) ; return map(None, x)", list(range(5)), next_enumerate=[List[int]])
def test_next_generator(self):
self.run_test("def next_generator(n): x = (i for i in xrange(n) for j in xrange(i)) ; next(x) ; return map(None, x)", 5, next_generator=[int])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_next_imap(self):
self.run_test("def next_imap(n): from itertools import imap ; x = imap(abs,n) ; next(x) ; return map(None, x)", range(-5,5), next_imap=[List[int]])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_next_imap_none(self):
self.run_test("def next_imap_none(n): from itertools import imap ; x = imap(None,n) ; next(x) ; return map(None, x)", range(-5,5), next_imap_none=[List[int]])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_next_ifilter(self):
self.run_test("def next_ifilter(n): from itertools import ifilter ; x = ifilter(abs,n) ; next(x) ; return map(None, x)", range(-5,5), next_ifilter=[List[int]])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_next_ifilter_none(self):
self.run_test("def next_ifilter_none(n): from itertools import ifilter ; x = ifilter(None,n) ; next(x) ; return map(None, x)", range(-5,5), next_ifilter_none=[List[int]])
def test_next_product(self):
self.run_test("def next_product(n): from itertools import product ; x = product(n,n) ; next(x) ; return map(None, x)", list(range(-5,5)), next_product=[List[int]])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_next_izip(self):
self.run_test("def next_izip(n): from itertools import izip ; x = izip(n,n) ; next(x) ; return map(None, x)", range(-5,5), next_izip=[List[int]])
def test_next_islice(self):
self.run_test("def next_islice(n): from itertools import islice ; x = islice(n,8) ; next(x) ; return map(None, x)", list(range(-5,5)), next_islice=[List[int]])
def test_next_count(self):
self.run_test("def next_count(n): from itertools import count ; x = count(n) ; next(x) ; return next(x)", 5, next_count=[int])
def test_iter(self):
self.run_test("def iter_(n): r = iter(range(5,n)) ; next(r) ; return next(r)", 12, iter_=[int])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_ifilter_with_nested_lambdas(self):
code = '''
def ifilter_with_nested_lambdas(N):
perf = lambda n: n == sum(i for i in xrange(1, n) if n % i == 0)
return map(perf, xrange(20))'''
self.run_test(code, 10, ifilter_with_nested_lambdas=[int])
def test_combinations_on_generator(self):
self.run_test("def combinations_g(l0,a): from itertools import combinations; return sum(map(lambda (x,y) : x*y, combinations((y for x in l0 for y in xrange(x)),a)))", [0,1,2], 2, combinations_g=[List[int],int])
def test_next_combinations(self):
self.run_test("def next_combinations(n): from itertools import combinations ; x = combinations(n,2) ; next(x) ; return map(None, x)", list(range(5)), next_combinations=[List[int]])
def test_combinations(self):
self.run_test("def combinations_(l0,a): from itertools import combinations; return sum(map(lambda (x,y) : x*y, combinations(l0,a)))", [0,1,2,3,4,5], 2, combinations_=[List[int],int])
def test_permutations_on_generator(self):
self.run_test("def permutations_g(l0,a): from itertools import permutations; return sum(map(lambda (x,y) : x*y, permutations((y for x in l0 for y in xrange(x)),a)))", [0,1,2], 2, permutations_g=[List[int],int])
def test_next_permutations(self):
self.run_test("def next_permutations(n):"
" from itertools import permutations ;"
" x = permutations(n,2) ;"
" next(x) ;"
" return map(None, x)",
list(range(5)),
next_permutations=[List[int]])
def test_permutations(self):
'''Test permutation without second arg'''
self.run_test("def permutations_2_(l0): "
" from itertools import permutations;"
" return list(permutations(l0))",
[0, 1, 2, 3],
permutations_2_=[List[int]])
def test_permutations_with_prefix(self):
self.run_test("def permutations_(l0,a):"
" from itertools import permutations;"
" return list(permutations(l0,a))",
[0,1,2,3,4,5], 2,
permutations_=[List[int],int])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_imap_over_array(self):
self.run_test("def imap_over_array(l):"
" from itertools import imap ;"
" from numpy import arange ;"
" t = tuple(imap(lambda x: 1, (l,l))) ;"
" return arange(10).reshape((5,2))[t]",
3,
imap_over_array=[int])
@unittest.skipIf(sys.version_info.major == 3, "not supported in pythran3")
def test_imap_over_several_arrays(self):
self.run_test("def imap_over_several_arrays(l):"
" from itertools import imap ;"
" from numpy import arange ;"
" t = tuple(imap(lambda x,y: 1, (l,l), (l, l, l))) ;"
" return arange(10).reshape((5,2))[t]",
3,
imap_over_several_arrays=[int])
def test_itertools_repeat0(self):
code = 'def itertools_repeat0(n): import itertools; return list(itertools.repeat(n, n))'
self.run_test(code, 3, itertools_repeat0=[int])
def test_itertools_repeat1(self):
code = '''
def itertools_repeat1(n):
import itertools
s = []
i = 0
for l in itertools.repeat([n]):
s.append(l)
i += 1
if i < n:
break
return s'''
self.run_test(code, 3, itertools_repeat1=[int])
|
test_ifilter_on_generator
|
question_node.js
|
// Question node
function QuestionNode(questionText, yes, no){
this.text = questionText;
this.yesAnswer = yes;
this.noAnswer = no;
}
QuestionNode.prototype.GetQuestion = function(){
return this.text;
}
QuestionNode.prototype.GetYes = function(){
return this.yesAnswer;
|
return this.noAnswer;
}
|
}
QuestionNode.prototype.GetNo = function(){
|
desired_test.go
|
package serviceaccount
import (
"context"
"testing"
"github.com/giantswarm/apiextensions/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/micrologger/microloggertest"
apiv1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/fake"
)
func Test_Resource_ServiceAccount_GetDesiredState(t *testing.T) {
testCases := []struct {
Obj interface{}
ExpectedName string
}{
{
Obj: &v1alpha1.KVMConfig{
Spec: v1alpha1.KVMConfigSpec{
Cluster: v1alpha1.Cluster{
ID: "al9qy",
},
},
},
ExpectedName: "al9qy",
},
{
Obj: &v1alpha1.KVMConfig{
Spec: v1alpha1.KVMConfigSpec{
Cluster: v1alpha1.Cluster{
ID: "my-cluster",
},
},
},
ExpectedName: "my-cluster",
},
}
var err error
var newResource *Resource
{
resourceConfig := DefaultConfig()
resourceConfig.K8sClient = fake.NewSimpleClientset()
resourceConfig.Logger = microloggertest.New()
newResource, err = New(resourceConfig)
if err != nil {
t.Fatal("expected", nil, "got", err)
}
}
for i, tc := range testCases {
result, err := newResource.GetDesiredState(context.TODO(), tc.Obj)
if err != nil {
t.Fatal("case", i+1, "expected", nil, "got", err)
|
if tc.ExpectedName != name {
t.Fatalf("case %d expected %#v got %#v", i+1, tc.ExpectedName, name)
}
}
}
|
}
name := result.(*apiv1.ServiceAccount).Name
|
duolingo_loader.py
|
import pendulum
import requests
from github_poster.loader.base_loader import BaseLoader
from github_poster.loader.config import DUOLINGO_CALENDAR_API
class DuolingoLoader(BaseLoader):
unit = "XP"
def __init__(self, from_year, to_year, **kwargs):
super().__init__(from_year, to_year)
self.user_name = kwargs.get("user_name", "")
@classmethod
def add_loader_arguments(cls, parser):
parser.add_argument(
"--user_name",
dest="user_name",
type=str,
help="",
required=True,
)
def
|
(self):
month_list = self.make_month_list()
data_list = []
for m in month_list:
r = requests.get(
DUOLINGO_CALENDAR_API.format(
user_id=self.user_name,
start_date=m.to_date_string(),
end_date=m.end_of("month").to_date_string(),
)
)
if not r.ok:
print(f"get duolingo calendar api failed {str(r.text)}")
try:
data_list.extend(r.json()["summaries"])
except Exception:
# just pass for now
pass
return data_list
def make_track_dict(self):
data_list = self.get_api_data()
for d in data_list:
date_str = pendulum.from_timestamp(d["date"]).to_date_string()
number = d["gainedXp"]
if number:
self.number_by_date_dict[date_str] = number
self.number_list.append(number)
def get_all_track_data(self):
self.make_track_dict()
self.make_special_number()
return self.number_by_date_dict, self.year_list
|
get_api_data
|
interpreter.rs
|
use std::path::Path;
use errors::*;
use file;
use Target;
/// Checks if the interpreters have been registered in the host system
pub fn is_registered(target: &Target) -> Result<bool> {
if file::read("/proc/sys/fs/binfmt_misc/status")?.trim() != "enabled" {
Err("host system doesn't have binfmt_misc support")?
}
let ok = if target.is_windows() {
let wine = Path::new("/proc/sys/fs/binfmt_misc/wine");
wine.exists() &&
{
let f = file::read(wine)?;
f.contains("/usr/bin/run-detectors") ||
|
f.contains("/usr/lib/binfmt-support/run-detectors")
}
} else {
// NOTE checking any architecture will do, here we pick arm
let qemu = Path::new("/proc/sys/fs/binfmt_misc/qemu-arm");
qemu.exists() && file::read(qemu)?.contains("/usr/bin/qemu-arm-static")
};
Ok(ok)
}
| |
application.root.js
|
class ApplicationRoot{
|
this.mediaService = mediaService;
}
}
export default {
controller: ApplicationRoot,
templateUrl: 'application-root/application.root.tpl.html'
};
|
// @ngInject
constructor($http, mediaService) {
this.$http = $http;
|
test_configadmin.py
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the ConfigurationAdmin shell commands
:author: Thomas Calmant
"""
# Pelix
import pelix.framework
import pelix.services
import pelix.shell
import pelix.shell.beans as beans
# Standard library
import os
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
import unittest2 as unittest
except ImportError:
import unittest
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
class ConfigAdminShellTest(unittest.TestCase):
"""
Tests the EventAdmin shell commands
"""
def setUp(self):
"""
Prepares a framework and a registers a service to export
"""
# Use a local configuration folder
conf_folder = os.path.join(os.path.dirname(__file__), "conf")
# Create the framework
self.framework = pelix.framework.create_framework(
('pelix.ipopo.core', 'pelix.shell.core',
'pelix.services.configadmin', 'pelix.shell.configadmin'),
{'configuration.folder': conf_folder})
self.framework.start()
# Get the Shell service
context = self.framework.get_bundle_context()
svc_ref = context.get_service_reference(pelix.shell.SERVICE_SHELL)
self.shell = context.get_service(svc_ref)
# Instantiate the EventAdmin component
context = self.framework.get_bundle_context()
# Get the service
self.config_ref = context.get_service_reference(
pelix.services.SERVICE_CONFIGURATION_ADMIN)
self.config = context.get_service(self.config_ref)
# Remove existing configurations
for config in self.config.list_configurations():
config.delete()
def _run_command(self, command, *args):
"""
Runs the given shell command
"""
# String output
str_output = StringIO()
|
if args:
command = command.format(*args)
# Add the namespace prefix
command = 'config.{0}'.format(command)
# Run command
session = beans.ShellSession(beans.IOHandler(None, str_output))
self.shell.execute(command, session)
return str_output.getvalue()
def tearDown(self):
"""
Cleans up for next test
"""
# Remove existing configurations
for config in self.config.list_configurations():
config.delete()
# Stop the framework
pelix.framework.FrameworkFactory.delete_framework()
self.framework = None
def testLifeCycle(self):
"""
Tests a configuration life cycle
"""
# Create a factory configuration
key = "testConfig"
first_value = "first"
factory_name = "testFactory"
output = self._run_command("create {0} {1}={2}", factory_name,
key, first_value)
# Get the generated configuration
config = next(iter(self.config.list_configurations()))
# Check validity
self.assertIn(config.get_pid(), output)
self.assertEqual(factory_name, config.get_factory_pid())
self.assertDictContainsSubset({key: first_value},
config.get_properties())
# Update it
second_value = "second"
self._run_command("update {0} {1}={2}", config.get_pid(),
key, second_value)
self.assertDictContainsSubset({key: second_value},
config.get_properties())
# Reload it
self._run_command("reload {0}", config.get_pid())
# List it
output = self._run_command('list')
self.assertIn(config.get_pid(), output)
output = self._run_command('list {0}', config.get_pid())
self.assertIn(config.get_pid(), output)
# Delete it
self._run_command("delete {0}", config.get_pid())
self.assertEqual(self.config.list_configurations(), set())
def testInvalidPid(self):
"""
Tests commands with invalid PIDs
"""
self._run_command("delete <invalid>")
self._run_command("list <invalid>")
self._run_command("reload <invalid>")
def testUpdate(self):
"""
Tests the update command
"""
pid = "testPid"
key = "testConfig"
value = "testValue"
# Create the configuration, with no property
self._run_command("update {0}", pid)
# Get the generated configuration
config = next(iter(self.config.list_configurations()))
self.assertEqual(config.get_pid(), pid)
self.assertIsNone(config.get_properties())
# Set a key
self._run_command("update {0} {1}={2}", pid, key, value)
self.assertDictContainsSubset({key: value}, config.get_properties())
# Remove a key
self._run_command("update {0} {1}=None", pid, key)
self.assertNotIn(key, config.get_properties())
def testList(self):
"""
Other tests for the list command
"""
pid = "testPid"
pid2 = "testPidBis"
key = "testConfig"
value = "testValue"
# Nothing at first
output = self._run_command("list")
self.assertIn("No configuration", output)
# List inexistent PID
output = self._run_command("list {0}", pid)
self.assertIn("No configuration", output)
# Create a configuration without properties
config = self.config.get_configuration(pid)
# List it
output = self._run_command("list {0}", pid)
self.assertIn("Not yet updated", output)
# Update it
config.update({key: value})
output = self._run_command("list {0}", pid)
self.assertIn(pid, output)
self.assertIn(key, output)
self.assertIn(value, output)
# Create a second one
config2 = self.config.get_configuration(pid2)
# Delete the first one
config.delete()
self.assertNotIn(config, self.config.list_configurations())
self.assertIn(config2, self.config.list_configurations())
# List it
output = self._run_command("list {0}", pid)
self.assertIn("No configuration", output)
self.assertIn(pid, output)
|
# Format command
|
App.js
|
import React, { Component } from 'react';
import '././App.css';
import { Route, Switch } from "react-router-dom";
import Layout from '../../Components/Layout/Layout';
import Dashboard from '../Dashboard/Dashboard';
import NetworkDesign from '../Network-design/Network-Design';
import ServerOnboarding from '../Server-onboarding/Server-Onboarding';
import BlockchainOperations from '../Blockchain-operations/create/Blockchain-operations-create';
import Organisations from '../Blockchain-operations/Organisations/Organisations';
import Channels from '../Blockchain-operations/Channels/Channels';
import Terminal from '../Terminal/Terminal';
class
|
extends Component {
state = {
}
render() {
return (
<Layout>
<Switch>
<Route path="/dashboard" component={Dashboard} />
<Route path="/server-onboarding" component={ServerOnboarding} />
<Route path="/network-design" component={NetworkDesign} />
<Route path="/blockchain-operation/create" component={BlockchainOperations} />
<Route path="/blockchain-operation/organisations" component={Organisations} />
<Route path="/blockchain-operation/channels" component={Channels} />
<Route path="/terminal" component={Terminal} />
<Route component={Dashboard} />
</Switch>
</Layout>
);
}
}
export default App;
|
App
|
DescriptionController.js
|
export class
|
{
constructor(Knekt) {
this.knekt = Knekt;
}
plugins(_, res) {
res({success: true, err_code: null, plugins: this.knekt.plugins});
}
}
|
DescriptionController
|
pcr25.rs
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::PCR25 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = "Possible values of the field `PS`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PSR {
#[doc = "Internal pulldown resistor is enabled on the corresponding pin, if the corresponding PE field is set."]
_0,
#[doc = "Internal pullup resistor is enabled on the corresponding pin, if the corresponding PE field is set."]
_1,
}
impl PSR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PSR::_0 => false,
PSR::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PSR {
match value {
false => PSR::_0,
true => PSR::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == PSR::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == PSR::_1
}
}
#[doc = "Possible values of the field `PE`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PER {
#[doc = "Internal pullup or pulldown resistor is not enabled on the corresponding pin."]
_0,
#[doc = "Internal pullup or pulldown resistor is enabled on the corresponding pin, if the pin is configured as a digital input."]
_1,
}
impl PER {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PER::_0 => false,
PER::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PER {
match value {
false => PER::_0,
true => PER::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == PER::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == PER::_1
}
}
#[doc = "Possible values of the field `SRE`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SRER {
#[doc = "Fast slew rate is configured on the corresponding pin, if the pin is configured as a digital output."]
_0,
#[doc = "Slow slew rate is configured on the corresponding pin, if the pin is configured as a digital output."]
_1,
}
impl SRER {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
SRER::_0 => false,
SRER::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> SRER {
match value {
false => SRER::_0,
true => SRER::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == SRER::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == SRER::_1
}
}
#[doc = "Possible values of the field `PFE`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PFER {
#[doc = "Passive input filter is disabled on the corresponding pin."]
_0,
#[doc = "Passive input filter is enabled on the corresponding pin, if the pin is configured as a digital input. Refer to the device data sheet for filter characteristics."]
_1,
}
impl PFER {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PFER::_0 => false,
PFER::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PFER {
match value {
false => PFER::_0,
true => PFER::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == PFER::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == PFER::_1
}
}
#[doc = "Possible values of the field `ODE`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ODER {
#[doc = "Open drain output is disabled on the corresponding pin."]
_0,
#[doc = "Open drain output is enabled on the corresponding pin, if the pin is configured as a digital output."]
_1,
}
impl ODER {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ODER::_0 => false,
ODER::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ODER {
match value {
false => ODER::_0,
true => ODER::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ODER::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ODER::_1
}
}
#[doc = "Possible values of the field `DSE`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DSER {
#[doc = "Low drive strength is configured on the corresponding pin, if pin is configured as a digital output."]
_0,
#[doc = "High drive strength is configured on the corresponding pin, if pin is configured as a digital output."]
_1,
}
impl DSER {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
DSER::_0 => false,
DSER::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> DSER {
match value {
false => DSER::_0,
true => DSER::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == DSER::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == DSER::_1
}
}
#[doc = "Possible values of the field `MUX`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum MUXR {
#[doc = "Pin disabled (analog)."]
_000,
#[doc = "Alternative 1 (GPIO)."]
_001,
#[doc = "Alternative 2 (chip-specific)."]
_010,
#[doc = "Alternative 3 (chip-specific)."]
_011,
#[doc = "Alternative 4 (chip-specific)."]
_100,
#[doc = "Alternative 5 (chip-specific)."]
_101,
#[doc = "Alternative 6 (chip-specific)."]
_110,
#[doc = "Alternative 7 (chip-specific)."]
_111,
}
impl MUXR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
MUXR::_000 => 0,
MUXR::_001 => 1,
MUXR::_010 => 2,
MUXR::_011 => 3,
MUXR::_100 => 4,
MUXR::_101 => 5,
MUXR::_110 => 6,
MUXR::_111 => 7,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> MUXR {
match value {
0 => MUXR::_000,
1 => MUXR::_001,
2 => MUXR::_010,
3 => MUXR::_011,
4 => MUXR::_100,
5 => MUXR::_101,
6 => MUXR::_110,
7 => MUXR::_111,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `_000`"]
#[inline]
pub fn is_000(&self) -> bool {
*self == MUXR::_000
}
#[doc = "Checks if the value of the field is `_001`"]
#[inline]
pub fn is_001(&self) -> bool {
*self == MUXR::_001
}
#[doc = "Checks if the value of the field is `_010`"]
#[inline]
pub fn is_010(&self) -> bool {
*self == MUXR::_010
}
#[doc = "Checks if the value of the field is `_011`"]
#[inline]
pub fn is_011(&self) -> bool {
*self == MUXR::_011
}
#[doc = "Checks if the value of the field is `_100`"]
#[inline]
pub fn is_100(&self) -> bool {
*self == MUXR::_100
}
#[doc = "Checks if the value of the field is `_101`"]
#[inline]
pub fn is_101(&self) -> bool {
*self == MUXR::_101
}
#[doc = "Checks if the value of the field is `_110`"]
#[inline]
pub fn is_110(&self) -> bool {
*self == MUXR::_110
}
#[doc = "Checks if the value of the field is `_111`"]
#[inline]
pub fn is_111(&self) -> bool {
*self == MUXR::_111
}
}
#[doc = "Possible values of the field `LK`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum LKR {
#[doc = "Pin Control Register fields \\[15:0\\] are not locked."]
_0,
#[doc = "Pin Control Register fields \\[15:0\\] are locked and cannot be updated until the next system reset."]
_1,
}
impl LKR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
LKR::_0 => false,
LKR::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> LKR {
match value {
false => LKR::_0,
true => LKR::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == LKR::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == LKR::_1
}
}
#[doc = "Possible values of the field `IRQC`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum IRQCR {
#[doc = "Interrupt Status Flag (ISF) is disabled."]
_0000,
#[doc = "ISF flag and DMA request on rising edge."]
_0001,
#[doc = "ISF flag and DMA request on falling edge."]
_0010,
#[doc = "ISF flag and DMA request on either edge."]
_0011,
#[doc = "ISF flag and Interrupt when logic 0."]
_1000,
#[doc = "ISF flag and Interrupt on rising-edge."]
_1001,
#[doc = "ISF flag and Interrupt on falling-edge."]
_1010,
#[doc = "ISF flag and Interrupt on either edge."]
_1011,
#[doc = "ISF flag and Interrupt when logic 1."]
_1100,
#[doc = r" Reserved"]
_Reserved(u8),
}
impl IRQCR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
IRQCR::_0000 => 0,
IRQCR::_0001 => 1,
IRQCR::_0010 => 2,
IRQCR::_0011 => 3,
IRQCR::_1000 => 8,
IRQCR::_1001 => 9,
IRQCR::_1010 => 10,
IRQCR::_1011 => 11,
IRQCR::_1100 => 12,
IRQCR::_Reserved(bits) => bits,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> IRQCR {
match value {
0 => IRQCR::_0000,
1 => IRQCR::_0001,
2 => IRQCR::_0010,
3 => IRQCR::_0011,
8 => IRQCR::_1000,
9 => IRQCR::_1001,
10 => IRQCR::_1010,
11 => IRQCR::_1011,
12 => IRQCR::_1100,
i => IRQCR::_Reserved(i),
}
}
#[doc = "Checks if the value of the field is `_0000`"]
#[inline]
pub fn is_0000(&self) -> bool {
*self == IRQCR::_0000
}
#[doc = "Checks if the value of the field is `_0001`"]
#[inline]
pub fn is_0001(&self) -> bool {
*self == IRQCR::_0001
}
#[doc = "Checks if the value of the field is `_0010`"]
#[inline]
pub fn is_0010(&self) -> bool {
*self == IRQCR::_0010
}
#[doc = "Checks if the value of the field is `_0011`"]
#[inline]
pub fn is_0011(&self) -> bool {
*self == IRQCR::_0011
}
#[doc = "Checks if the value of the field is `_1000`"]
#[inline]
pub fn is_1000(&self) -> bool {
*self == IRQCR::_1000
}
#[doc = "Checks if the value of the field is `_1001`"]
#[inline]
pub fn is_1001(&self) -> bool {
*self == IRQCR::_1001
}
#[doc = "Checks if the value of the field is `_1010`"]
#[inline]
pub fn is_1010(&self) -> bool {
*self == IRQCR::_1010
}
#[doc = "Checks if the value of the field is `_1011`"]
#[inline]
pub fn is_1011(&self) -> bool {
*self == IRQCR::_1011
}
#[doc = "Checks if the value of the field is `_1100`"]
#[inline]
pub fn is_1100(&self) -> bool {
*self == IRQCR::_1100
}
}
#[doc = "Possible values of the field `ISF`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ISFR {
#[doc = "Configured interrupt is not detected."]
_0,
#[doc = "Configured interrupt is detected. If the pin is configured to generate a DMA request, then the corresponding flag will be cleared automatically at the completion of the requested DMA transfer. Otherwise, the flag remains set until a logic 1 is written to the flag. If the pin is configured for a level sensitive interrupt and the pin remains asserted, then the flag is set again immediately after it is cleared."]
_1,
}
impl ISFR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ISFR::_0 => false,
ISFR::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ISFR {
match value {
false => ISFR::_0,
true => ISFR::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ISFR::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ISFR::_1
}
}
#[doc = "Values that can be written to the field `MUX`"]
pub enum MUXW {
#[doc = "Pin disabled (analog)."]
_000,
#[doc = "Alternative 1 (GPIO)."]
_001,
#[doc = "Alternative 2 (chip-specific)."]
_010,
#[doc = "Alternative 3 (chip-specific)."]
_011,
#[doc = "Alternative 4 (chip-specific)."]
_100,
#[doc = "Alternative 5 (chip-specific)."]
_101,
#[doc = "Alternative 6 (chip-specific)."]
_110,
#[doc = "Alternative 7 (chip-specific)."]
_111,
}
impl MUXW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
MUXW::_000 => 0,
MUXW::_001 => 1,
MUXW::_010 => 2,
MUXW::_011 => 3,
MUXW::_100 => 4,
MUXW::_101 => 5,
MUXW::_110 => 6,
MUXW::_111 => 7,
}
}
}
#[doc = r" Proxy"]
pub struct _MUXW<'a> {
w: &'a mut W,
}
impl<'a> _MUXW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: MUXW) -> &'a mut W {
{
self.bits(variant._bits())
}
}
#[doc = "Pin disabled (analog)."]
#[inline]
pub fn _000(self) -> &'a mut W {
self.variant(MUXW::_000)
}
#[doc = "Alternative 1 (GPIO)."]
#[inline]
pub fn _001(self) -> &'a mut W {
self.variant(MUXW::_001)
}
#[doc = "Alternative 2 (chip-specific)."]
#[inline]
pub fn _010(self) -> &'a mut W {
self.variant(MUXW::_010)
}
#[doc = "Alternative 3 (chip-specific)."]
#[inline]
pub fn _011(self) -> &'a mut W {
self.variant(MUXW::_011)
}
#[doc = "Alternative 4 (chip-specific)."]
#[inline]
pub fn _100(self) -> &'a mut W {
self.variant(MUXW::_100)
}
#[doc = "Alternative 5 (chip-specific)."]
#[inline]
pub fn _101(self) -> &'a mut W {
self.variant(MUXW::_101)
}
#[doc = "Alternative 6 (chip-specific)."]
#[inline]
pub fn _110(self) -> &'a mut W {
self.variant(MUXW::_110)
}
#[doc = "Alternative 7 (chip-specific)."]
#[inline]
pub fn _111(self) -> &'a mut W {
self.variant(MUXW::_111)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 7;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `LK`"]
pub enum LKW {
#[doc = "Pin Control Register fields \\[15:0\\] are not locked."]
_0,
#[doc = "Pin Control Register fields \\[15:0\\] are locked and cannot be updated until the next system reset."]
_1,
}
impl LKW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
LKW::_0 => false,
LKW::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _LKW<'a> {
w: &'a mut W,
}
impl<'a> _LKW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: LKW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Pin Control Register fields \\[15:0\\] are not locked."]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(LKW::_0)
}
#[doc = "Pin Control Register fields \\[15:0\\] are locked and cannot be updated until the next system reset."]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(LKW::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 15;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `IRQC`"]
pub enum IRQCW {
#[doc = "Interrupt Status Flag (ISF) is disabled."]
_0000,
#[doc = "ISF flag and DMA request on rising edge."]
_0001,
#[doc = "ISF flag and DMA request on falling edge."]
_0010,
#[doc = "ISF flag and DMA request on either edge."]
_0011,
#[doc = "ISF flag and Interrupt when logic 0."]
_1000,
#[doc = "ISF flag and Interrupt on rising-edge."]
_1001,
#[doc = "ISF flag and Interrupt on falling-edge."]
_1010,
#[doc = "ISF flag and Interrupt on either edge."]
_1011,
#[doc = "ISF flag and Interrupt when logic 1."]
_1100,
}
impl IRQCW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
IRQCW::_0000 => 0,
IRQCW::_0001 => 1,
IRQCW::_0010 => 2,
IRQCW::_0011 => 3,
IRQCW::_1000 => 8,
IRQCW::_1001 => 9,
IRQCW::_1010 => 10,
IRQCW::_1011 => 11,
IRQCW::_1100 => 12,
}
}
}
#[doc = r" Proxy"]
pub struct _IRQCW<'a> {
w: &'a mut W,
}
impl<'a> _IRQCW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: IRQCW) -> &'a mut W {
unsafe { self.bits(variant._bits()) }
}
#[doc = "Interrupt Status Flag (ISF) is disabled."]
#[inline]
pub fn _0000(self) -> &'a mut W {
self.variant(IRQCW::_0000)
}
#[doc = "ISF flag and DMA request on rising edge."]
#[inline]
pub fn _0001(self) -> &'a mut W {
self.variant(IRQCW::_0001)
}
#[doc = "ISF flag and DMA request on falling edge."]
#[inline]
pub fn _0010(self) -> &'a mut W {
self.variant(IRQCW::_0010)
}
#[doc = "ISF flag and DMA request on either edge."]
#[inline]
pub fn _0011(self) -> &'a mut W {
self.variant(IRQCW::_0011)
}
#[doc = "ISF flag and Interrupt when logic 0."]
#[inline]
pub fn _1000(self) -> &'a mut W {
self.variant(IRQCW::_1000)
}
#[doc = "ISF flag and Interrupt on rising-edge."]
#[inline]
pub fn _1001(self) -> &'a mut W {
self.variant(IRQCW::_1001)
}
#[doc = "ISF flag and Interrupt on falling-edge."]
#[inline]
pub fn _1010(self) -> &'a mut W {
self.variant(IRQCW::_1010)
}
#[doc = "ISF flag and Interrupt on either edge."]
#[inline]
pub fn _1011(self) -> &'a mut W {
self.variant(IRQCW::_1011)
}
#[doc = "ISF flag and Interrupt when logic 1."]
#[inline]
pub fn _1100(self) -> &'a mut W {
self.variant(IRQCW::_1100)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 15;
const OFFSET: u8 = 16;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ISF`"]
pub enum ISFW {
#[doc = "Configured interrupt is not detected."]
_0,
#[doc = "Configured interrupt is detected. If the pin is configured to generate a DMA request, then the corresponding flag will be cleared automatically at the completion of the requested DMA transfer. Otherwise, the flag remains set until a logic 1 is written to the flag. If the pin is configured for a level sensitive interrupt and the pin remains asserted, then the flag is set again immediately after it is cleared."]
_1,
}
impl ISFW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ISFW::_0 => false,
ISFW::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ISFW<'a> {
w: &'a mut W,
}
impl<'a> _ISFW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ISFW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Configured interrupt is not detected."]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ISFW::_0)
}
#[doc = "Configured interrupt is detected. If the pin is configured to generate a DMA request, then the corresponding flag will be cleared automatically at the completion of the requested DMA transfer. Otherwise, the flag remains set until a logic 1 is written to the flag. If the pin is configured for a level sensitive interrupt and the pin remains asserted, then the flag is set again immediately after it is cleared."]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ISFW::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
|
const MASK: bool = true;
const OFFSET: u8 = 24;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 0 - Pull Select"]
#[inline]
pub fn ps(&self) -> PSR {
PSR::_from({
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 1 - Pull Enable"]
#[inline]
pub fn pe(&self) -> PER {
PER::_from({
const MASK: bool = true;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 2 - Slew Rate Enable"]
#[inline]
pub fn sre(&self) -> SRER {
SRER::_from({
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 4 - Passive Filter Enable"]
#[inline]
pub fn pfe(&self) -> PFER {
PFER::_from({
const MASK: bool = true;
const OFFSET: u8 = 4;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 5 - Open Drain Enable"]
#[inline]
pub fn ode(&self) -> ODER {
ODER::_from({
const MASK: bool = true;
const OFFSET: u8 = 5;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 6 - Drive Strength Enable"]
#[inline]
pub fn dse(&self) -> DSER {
DSER::_from({
const MASK: bool = true;
const OFFSET: u8 = 6;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bits 8:10 - Pin Mux Control"]
#[inline]
pub fn mux(&self) -> MUXR {
MUXR::_from({
const MASK: u8 = 7;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bit 15 - Lock Register"]
#[inline]
pub fn lk(&self) -> LKR {
LKR::_from({
const MASK: bool = true;
const OFFSET: u8 = 15;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bits 16:19 - Interrupt Configuration"]
#[inline]
pub fn irqc(&self) -> IRQCR {
IRQCR::_from({
const MASK: u8 = 15;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bit 24 - Interrupt Status Flag"]
#[inline]
pub fn isf(&self) -> ISFR {
ISFR::_from({
const MASK: bool = true;
const OFFSET: u8 = 24;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 8:10 - Pin Mux Control"]
#[inline]
pub fn mux(&mut self) -> _MUXW {
_MUXW { w: self }
}
#[doc = "Bit 15 - Lock Register"]
#[inline]
pub fn lk(&mut self) -> _LKW {
_LKW { w: self }
}
#[doc = "Bits 16:19 - Interrupt Configuration"]
#[inline]
pub fn irqc(&mut self) -> _IRQCW {
_IRQCW { w: self }
}
#[doc = "Bit 24 - Interrupt Status Flag"]
#[inline]
pub fn isf(&mut self) -> _ISFW {
_ISFW { w: self }
}
}
|
pub fn bit(self, value: bool) -> &'a mut W {
|
gcc.py
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from os.path import join, basename, splitext
from workspace_tools.toolchains import mbedToolchain
from workspace_tools.settings import GCC_ARM_PATH, GCC_CR_PATH, GCC_CS_PATH, CW_EWL_PATH, CW_GCC_PATH
from workspace_tools.settings import GOANNA_PATH
from workspace_tools.hooks import hook_tool
class GCC(mbedToolchain):
LINKER_EXT = '.ld'
LIBRARY_EXT = '.a'
STD_LIB_NAME = "lib%s.a"
CIRCULAR_DEPENDENCIES = True
DIAGNOSTIC_PATTERN = re.compile('((?P<line>\d+):)(\d+:)? (?P<severity>warning|error): (?P<message>.+)')
def __init__(self, target, options=None, notify=None, macros=None, silent=False, tool_path="", extra_verbose=False):
mbedToolchain.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
if target.core == "Cortex-M0+":
cpu = "cortex-m0plus"
elif target.core == "Cortex-M4F":
cpu = "cortex-m4"
elif target.core == "Cortex-M7F":
cpu = "cortex-m7"
else:
cpu = target.core.lower()
self.cpu = ["-mcpu=%s" % cpu]
if target.core.startswith("Cortex"):
self.cpu.append("-mthumb")
if target.core == "Cortex-M4F":
self.cpu.append("-mfpu=fpv4-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
elif target.core == "Cortex-M7F":
self.cpu.append("-mfpu=fpv5-d16")
self.cpu.append("-mfloat-abi=softfp")
if target.core == "Cortex-A9":
self.cpu.append("-mthumb-interwork")
self.cpu.append("-marm")
self.cpu.append("-march=armv7-a")
self.cpu.append("-mfpu=vfpv3")
self.cpu.append("-mfloat-abi=hard")
self.cpu.append("-mno-unaligned-access")
# Note: We are using "-O2" instead of "-Os" to avoid this known GCC bug:
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46762
common_flags = ["-c", "-Wall", "-Wextra",
"-Wno-unused-parameter", "-Wno-missing-field-initializers",
"-fmessage-length=0", "-fno-exceptions", "-fno-builtin",
"-ffunction-sections", "-fdata-sections",
"-MMD", "-fno-delete-null-pointer-checks", "-fomit-frame-pointer"
] + self.cpu
if "save-asm" in self.options:
common_flags.append("-save-temps")
if "debug-info" in self.options:
common_flags.append("-g")
common_flags.append("-O0")
else:
common_flags.append("-O2")
main_cc = join(tool_path, "arm-none-eabi-gcc")
main_cppc = join(tool_path, "arm-none-eabi-g++")
self.asm = [main_cc, "-x", "assembler-with-cpp"] + common_flags
if not "analyze" in self.options:
self.cc = [main_cc, "-std=gnu99"] + common_flags
self.cppc =[main_cppc, "-std=gnu++98", "-fno-rtti"] + common_flags
else:
self.cc = [join(GOANNA_PATH, "goannacc"), "--with-cc=" + main_cc.replace('\\', '/'), "-std=gnu99", "--dialect=gnu", '--output-format="%s"' % self.GOANNA_FORMAT] + common_flags
self.cppc= [join(GOANNA_PATH, "goannac++"), "--with-cxx=" + main_cppc.replace('\\', '/'), "-std=gnu++98", "-fno-rtti", "--dialect=gnu", '--output-format="%s"' % self.GOANNA_FORMAT] + common_flags
self.ld = [join(tool_path, "arm-none-eabi-gcc"), "-Wl,--gc-sections", "-Wl,--wrap,main"] + self.cpu
self.sys_libs = ["stdc++", "supc++", "m", "c", "gcc"]
self.ar = join(tool_path, "arm-none-eabi-ar")
self.elf2bin = join(tool_path, "arm-none-eabi-objcopy")
def assemble(self, source, object, includes):
return [self.hook.get_cmdline_assembler(self.asm + ['-D%s' % s for s in self.get_symbols() + self.macros] + ["-I%s" % i for i in includes] + ["-o", object, source])]
def parse_dependencies(self, dep_path):
dependencies = []
for line in open(dep_path).readlines()[1:]:
file = line.replace('\\\n', '').strip()
if file:
# GCC might list more than one dependency on a single line, in this case
# the dependencies are separated by a space. However, a space might also
# indicate an actual space character in a dependency path, but in this case
# the space character is prefixed by a backslash.
# Temporary replace all '\ ' with a special char that is not used (\a in this
# case) to keep them from being interpreted by 'split' (they will be converted
# back later to a space char)
file = file.replace('\\ ', '\a')
if file.find(" ") == -1:
dependencies.append(file.replace('\a', ' '))
else:
dependencies = dependencies + [f.replace('\a', ' ') for f in file.split(" ")]
return dependencies
def parse_output(self, output):
# The warning/error notification is multiline
WHERE, WHAT = 0, 1
state, file, message = WHERE, None, None
for line in output.splitlines():
match = self.goanna_parse_line(line)
if match is not None:
self.cc_info(
match.group('severity').lower(),
match.group('file'),
match.group('line'),
match.group('message'),
target_name=self.target.name,
toolchain_name=self.name
)
continue
# Each line should start with the file information: "filepath: ..."
# i should point past the file path ^
# avoid the first column in Windows (C:\)
i = line.find(':', 2)
if i == -1: continue
if state == WHERE:
file = line[:i]
message = line[i+1:].strip() + ' '
state = WHAT
elif state == WHAT:
match = GCC.DIAGNOSTIC_PATTERN.match(line[i+1:])
if match is None:
state = WHERE
continue
self.cc_info(
match.group('severity'),
file, match.group('line'),
message + match.group('message')
)
def archive(self, objects, lib_path):
self.default_cmd([self.ar, "rcs", lib_path] + objects)
def link(self, output, objects, libraries, lib_dirs, mem_map):
libs = []
for l in libraries:
name, _ = splitext(basename(l))
libs.append("-l%s" % name[3:])
libs.extend(["-l%s" % l for l in self.sys_libs])
# NOTE: There is a circular dependency between the mbed library and the clib
# We could define a set of week symbols to satisfy the clib dependencies in "sys.o",
# but if an application uses only clib symbols and not mbed symbols, then the final
# image is not correctly retargeted
if self.CIRCULAR_DEPENDENCIES:
libs.extend(libs)
self.default_cmd(self.hook.get_cmdline_linker(self.ld + ["-T%s" % mem_map, "-o", output] +
objects + ["-L%s" % L for L in lib_dirs] + libs))
@hook_tool
def binary(self, resources, elf, bin):
self.default_cmd(self.hook.get_cmdline_binary([self.elf2bin, "-O", "binary", elf, bin]))
class GCC_ARM(GCC):
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
GCC.__init__(self, target, options, notify, macros, silent, GCC_ARM_PATH, extra_verbose=extra_verbose)
# Use latest gcc nanolib
self.ld.append("--specs=nano.specs")
if target.name in ["LPC1768", "LPC4088", "LPC4088_DM", "LPC4330", "UBLOX_C027", "LPC2368"]:
self.ld.extend(["-u _printf_float", "-u _scanf_float"])
elif target.name in ["RZ_A1H", "ARCH_MAX", "DISCO_F407VG", "DISCO_F429ZI", "DISCO_F469NI", "NUCLEO_F401RE", "NUCLEO_F410RB", "NUCLEO_F411RE", "NUCLEO_F446RE", "ELMO_F411RE", "MTS_MDOT_F411RE", "MTS_DRAGONFLY_F411RE", "DISCO_F746NG"]:
self.ld.extend(["-u_printf_float", "-u_scanf_float"])
self.sys_libs.append("nosys")
class GCC_CR(GCC):
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
GCC.__init__(self, target, options, notify, macros, silent, GCC_CR_PATH, extra_verbose=extra_verbose)
additional_compiler_flags = [
"-D__NEWLIB__", "-D__CODE_RED", "-D__USE_CMSIS", "-DCPP_USE_HEAP",
]
self.cc += additional_compiler_flags
self.cppc += additional_compiler_flags
# Use latest gcc nanolib
self.ld.append("--specs=nano.specs")
if target.name in ["LPC1768", "LPC4088", "LPC4088_DM", "LPC4330", "UBLOX_C027", "LPC2368"]:
self.ld.extend(["-u _printf_float", "-u _scanf_float"])
self.ld += ["-nostdlib"]
|
GCC.__init__(self, target, options, notify, macros, silent, GCC_CS_PATH, extra_verbose=extra_verbose)
class GCC_CW(GCC):
ARCH_LIB = {
"Cortex-M0+": "armv6-m",
}
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
GCC.__init__(self, target, options, notify, macros, silent, CW_GCC_PATH, extra_verbose=extra_verbose)
class GCC_CW_EWL(GCC_CW):
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
GCC_CW.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
# Compiler
common = [
'-mfloat-abi=soft',
'-nostdinc', '-I%s' % join(CW_EWL_PATH, "EWL_C", "include"),
]
self.cc += common + [
'-include', join(CW_EWL_PATH, "EWL_C", "include", 'lib_c99.prefix')
]
self.cppc += common + [
'-nostdinc++', '-I%s' % join(CW_EWL_PATH, "EWL_C++", "include"),
'-include', join(CW_EWL_PATH, "EWL_C++", "include", 'lib_ewl_c++.prefix')
]
# Linker
self.sys_libs = []
self.CIRCULAR_DEPENDENCIES = False
self.ld = [join(CW_GCC_PATH, "arm-none-eabi-g++"),
"-Xlinker --gc-sections",
"-L%s" % join(CW_EWL_PATH, "lib", GCC_CW.ARCH_LIB[target.core]),
"-n", "-specs=ewl_c++.specs", "-mfloat-abi=soft",
"-Xlinker --undefined=__pformatter_", "-Xlinker --defsym=__pformatter=__pformatter_",
"-Xlinker --undefined=__sformatter", "-Xlinker --defsym=__sformatter=__sformatter",
] + self.cpu
class GCC_CW_NEWLIB(GCC_CW):
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
GCC_CW.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
|
class GCC_CS(GCC):
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
|
permissions.go
|
package permissions
import (
"fmt"
"gopkg.in/macaroon-bakery.v2/bakery"
daemonv1 "github.com/tdex-network/tdex-daemon/api-spec/protobuf/gen/tdex-daemon/v1"
tdexv1 "github.com/tdex-network/tdex-daemon/api-spec/protobuf/gen/tdex/v1"
tdexold "github.com/tdex-network/tdex-protobuf/generated/go/trade"
)
const (
EntityOperator = "operator"
EntityTrade = "trade"
EntityMarket = "market"
EntityPrice = "price"
EntityUnlocker = "unlocker"
EntityWallet = "wallet"
EntityWebhook = "webhook"
EntityTransport = "transport"
)
func Validate() error {
methodsThatNeedsAuth := AllPermissionsByMethod()
publicRoutes := Whitelist()
unhandledMethods := findUnhandledMethods(publicRoutes, methodsThatNeedsAuth)
if len(unhandledMethods) > 0 {
return fmt.Errorf("unhandled permissions for following methods: %v", unhandledMethods)
}
return nil
}
// findUnhandledMethods returns RPC methods that are not included in public routes
//nor in routes for which invocation some kind of auth is needed
//purpose of this check is to prevent forgetting adding of new rpc methods to public/auth map
func findUnhandledMethods(publicRoutes, methodsThatNeedsAuth map[string][]bakery.Op) []string {
result := make([]string, 0)
allMethods := make([]string, 0)
for _, v := range daemonv1.OperatorService_ServiceDesc.Methods {
allMethods = append(allMethods, fmt.Sprintf("/%s/%s", daemonv1.OperatorService_ServiceDesc.ServiceName, v.MethodName))
}
for _, v := range daemonv1.WalletService_ServiceDesc.Methods {
allMethods = append(allMethods, fmt.Sprintf("/%s/%s", daemonv1.WalletService_ServiceDesc.ServiceName, v.MethodName))
}
for _, v := range daemonv1.WalletUnlockerService_ServiceDesc.Methods {
allMethods = append(allMethods, fmt.Sprintf("/%s/%s", daemonv1.WalletUnlockerService_ServiceDesc.ServiceName, v.MethodName))
}
for _, v := range tdexv1.TradeService_ServiceDesc.Methods {
allMethods = append(allMethods, fmt.Sprintf("/%s/%s", tdexv1.TradeService_ServiceDesc.ServiceName, v.MethodName))
}
for _, v := range tdexv1.TransportService_ServiceDesc.Methods {
allMethods = append(allMethods, fmt.Sprintf("/%s/%s", tdexv1.TransportService_ServiceDesc.ServiceName, v.MethodName))
}
for _, v := range allMethods {
_, ok := publicRoutes[v]
if ok {
continue
}
_, ok = methodsThatNeedsAuth[v]
if ok {
continue
}
result = append(result, v)
}
return result
}
// MarketPermissions returns the permissions of the macaroon market.macaroon.
// This grants access to all actions for the market and price entities.
func MarketPermissions() []bakery.Op {
return []bakery.Op{
{
Entity: EntityMarket,
Action: "read",
},
{
Entity: EntityMarket,
Action: "write",
},
{
Entity: EntityPrice,
Action: "read",
},
{
Entity: EntityPrice,
Action: "write",
},
}
}
// PricePermissions returns the permissions of the macaroon price.macaroon.
// This grants access to all actions for the price entity.
func PricePermissions() []bakery.Op {
return []bakery.Op{
{
Entity: EntityPrice,
Action: "read",
},
{
Entity: EntityPrice,
Action: "write",
},
}
}
// ReadOnlyPermissions returns the permissions of the macaroon readonly.macaroon.
// This grants access to the read action for all entities.
func
|
() []bakery.Op {
return []bakery.Op{
{
Entity: EntityOperator,
Action: "read",
},
{
Entity: EntityMarket,
Action: "read",
},
{
Entity: EntityPrice,
Action: "read",
},
{
Entity: EntityWallet,
Action: "read",
},
{
Entity: EntityWebhook,
Action: "read",
},
}
}
// WalletPermissions returns the permissions of the macaroon wallet.macaroon.
// This grants access to the all actions for the wallet entity.
func WalletPermissions() []bakery.Op {
return []bakery.Op{
{
Entity: EntityWallet,
Action: "read",
},
{
Entity: EntityWallet,
Action: "write",
},
}
}
// WebhookPermissions returns the permissions of the macaroon webhook.macaroon.
// This grants access to the all actions for the webhook entity.
func WebhookPermissions() []bakery.Op {
return []bakery.Op{
{
Entity: EntityWebhook,
Action: "read",
},
{
Entity: EntityWebhook,
Action: "write",
},
}
}
// AdminPermissions returns the permissions of the macaroon admin.macaroon.
// This grants access to the all actions for all entities.
func AdminPermissions() []bakery.Op {
return []bakery.Op{
{
Entity: EntityOperator,
Action: "read",
},
{
Entity: EntityOperator,
Action: "write",
},
{
Entity: EntityMarket,
Action: "read",
},
{
Entity: EntityMarket,
Action: "write",
},
{
Entity: EntityPrice,
Action: "read",
},
{
Entity: EntityPrice,
Action: "write",
},
{
Entity: EntityWebhook,
Action: "read",
},
{
Entity: EntityWebhook,
Action: "write",
},
{
Entity: EntityWallet,
Action: "read",
},
{
Entity: EntityWallet,
Action: "write",
},
}
}
// Whitelist returns the list of all whitelisted methods with the relative
// entity and action.
func Whitelist() map[string][]bakery.Op {
return map[string][]bakery.Op{
fmt.Sprintf("/%s/IsReady", daemonv1.WalletUnlockerService_ServiceDesc.ServiceName): {{
Entity: EntityUnlocker,
Action: "read",
}},
fmt.Sprintf("/%s/GenSeed", daemonv1.WalletUnlockerService_ServiceDesc.ServiceName): {{
Entity: EntityUnlocker,
Action: "read",
}},
fmt.Sprintf("/%s/InitWallet", daemonv1.WalletUnlockerService_ServiceDesc.ServiceName): {{
Entity: EntityUnlocker,
Action: "write",
}},
fmt.Sprintf("/%s/UnlockWallet", daemonv1.WalletUnlockerService_ServiceDesc.ServiceName): {{
Entity: EntityUnlocker,
Action: "write",
}},
fmt.Sprintf("/%s/ChangePassword", daemonv1.WalletUnlockerService_ServiceDesc.ServiceName): {{
Entity: EntityUnlocker,
Action: "write",
}},
fmt.Sprintf("/%s/ListMarkets", tdexv1.TradeService_ServiceDesc.ServiceName): {{
Entity: EntityTrade,
Action: "read",
}},
fmt.Sprintf("/%s/GetMarketBalance", tdexv1.TradeService_ServiceDesc.ServiceName): {{
Entity: EntityTrade,
Action: "read",
}},
fmt.Sprintf("/%s/PreviewTrade", tdexv1.TradeService_ServiceDesc.ServiceName): {{
Entity: EntityTrade,
Action: "read",
}},
fmt.Sprintf("/%s/ProposeTrade", tdexv1.TradeService_ServiceDesc.ServiceName): {{
Entity: EntityTrade,
Action: "write",
}},
fmt.Sprintf("/%s/CompleteTrade", tdexv1.TradeService_ServiceDesc.ServiceName): {{
Entity: EntityTrade,
Action: "write",
}},
fmt.Sprintf("/%v/SupportedContentTypes", tdexv1.TransportService_ServiceDesc.ServiceName): {{
Entity: EntityTransport,
Action: "read",
}},
// Tdex old proto
fmt.Sprintf("/%s/Markets", tdexold.File_trade_proto.Services().Get(0).FullName()): {{
Entity: EntityTrade,
Action: "read",
}},
fmt.Sprintf("/%s/Balances", tdexold.File_trade_proto.Services().Get(0).FullName()): {{
Entity: EntityTrade,
Action: "read",
}},
fmt.Sprintf("/%s/MarketPrice", tdexold.File_trade_proto.Services().Get(0).FullName()): {{
Entity: EntityTrade,
Action: "read",
}},
fmt.Sprintf("/%s/TradePropose", tdexold.File_trade_proto.Services().Get(0).FullName()): {{
Entity: EntityTrade,
Action: "write",
}},
fmt.Sprintf("/%s/ProposeTrade", tdexold.File_trade_proto.Services().Get(0).FullName()): {{
Entity: EntityTrade,
Action: "write",
}},
fmt.Sprintf("/%s/TradeComplete", tdexold.File_trade_proto.Services().Get(0).FullName()): {{
Entity: EntityTrade,
Action: "write",
}},
fmt.Sprintf("/%s/CompleteTrade", tdexold.File_trade_proto.Services().Get(0).FullName()): {{
Entity: EntityTrade,
Action: "write",
}},
}
}
// AllPermissionsByMethod returns a mapping of the RPC server calls to the
// permissions they require.
func AllPermissionsByMethod() map[string][]bakery.Op {
return map[string][]bakery.Op{
fmt.Sprintf("/%s/WalletAddress", daemonv1.WalletService_ServiceDesc.ServiceName): {{
Entity: EntityWallet,
Action: "write",
}},
fmt.Sprintf("/%s/SendToMany", daemonv1.WalletService_ServiceDesc.ServiceName): {{
Entity: EntityWallet,
Action: "write",
}},
fmt.Sprintf("/%s/WalletBalance", daemonv1.WalletService_ServiceDesc.ServiceName): {{
Entity: EntityWallet,
Action: "read",
}},
fmt.Sprintf("/%s/GetInfo", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityOperator,
Action: "read",
}},
fmt.Sprintf("/%s/GetFeeAddress", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/ListFeeAddresses", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "read",
}},
fmt.Sprintf("/%s/GetFeeBalance", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "read",
}},
fmt.Sprintf("/%s/ClaimFeeDeposits", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/WithdrawFee", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityOperator,
Action: "write",
}},
fmt.Sprintf("/%s/NewMarket", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/GetMarketInfo", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "read",
}},
fmt.Sprintf("/%s/GetMarketAddress", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/ListMarketAddresses", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "read",
}},
fmt.Sprintf("/%s/GetMarketBalance", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "read",
}},
fmt.Sprintf("/%s/ClaimMarketDeposits", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/OpenMarket", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/CloseMarket", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/DropMarket", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityOperator,
Action: "write",
}},
fmt.Sprintf("/%s/GetMarketCollectedSwapFees", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "read",
}},
fmt.Sprintf("/%s/WithdrawMarket", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityOperator,
Action: "write",
}},
fmt.Sprintf("/%s/UpdateMarketPercentageFee", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/UpdateMarketFixedFee", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/UpdateMarketPrice", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityPrice,
Action: "write",
}},
fmt.Sprintf("/%s/UpdateMarketStrategy", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/GetFeeFragmenterAddress", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/ListFeeFragmenterAddresses", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "read",
}},
fmt.Sprintf("/%s/GetFeeFragmenterBalance", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "read",
}},
fmt.Sprintf("/%s/FeeFragmenterSplitFunds", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/WithdrawFeeFragmenter", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/GetMarketFragmenterAddress", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/ListMarketFragmenterAddresses", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "read",
}},
fmt.Sprintf("/%s/GetMarketFragmenterBalance", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "read",
}},
fmt.Sprintf("/%s/MarketFragmenterSplitFunds", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/WithdrawMarketFragmenter", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "write",
}},
fmt.Sprintf("/%s/ListMarkets", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityPrice,
Action: "read",
}},
fmt.Sprintf("/%s/ListTrades", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "read",
}},
fmt.Sprintf("/%s/ListDeposits", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "read",
}},
fmt.Sprintf("/%s/ListWithdrawals", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "read",
}},
fmt.Sprintf("/%s/ReloadUtxos", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityOperator,
Action: "write",
}},
fmt.Sprintf("/%s/ListUtxos", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityOperator,
Action: "read",
}},
fmt.Sprintf("/%s/AddWebhook", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityWebhook,
Action: "write",
}},
fmt.Sprintf("/%s/RemoveWebhook", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityWebhook,
Action: "write",
}},
fmt.Sprintf("/%s/ListWebhooks", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityWebhook,
Action: "read",
}},
fmt.Sprintf("/%s/GetMarketReport", daemonv1.OperatorService_ServiceDesc.ServiceName): {{
Entity: EntityMarket,
Action: "read",
}},
"/Transport/SupportedContentTypes": {{
Entity: EntityTransport,
Action: "read",
}},
}
}
|
ReadOnlyPermissions
|
MonthlySyncLogDao.js
|
import { AIRPORT_DATABASE, and } from '@airport/air-control';
import { container, DI } from '@airport/di';
import { MONTHLY_SYNC_LOG_DAO } from '../tokens';
import { BaseMonthlySyncLogDao } from '../generated/baseDaos';
import { Q } from '../generated/qApplication';
export class MonthlySyncLogDao extends BaseMonthlySyncLogDao {
async findAllForDatabase(databaseId, synced, callback) {
let dsl;
const airDb = await container(this).get(AIRPORT_DATABASE);
await airDb.find.sheet({
from: [
dsl = Q.MonthlySyncLog
],
select: [
|
}, 1000, (syncSyncLogRows) => {
callback(syncSyncLogRows);
});
}
async updateSyncStatus(databaseId, repositoryIds, synced) {
let dsl;
await this.db.updateWhere({
update: dsl = Q.MonthlySyncLog,
set: {
synced
},
where: and(dsl.databaseId.equals(databaseId), dsl.repositoryId.in(repositoryIds))
});
}
}
DI.set(MONTHLY_SYNC_LOG_DAO, MonthlySyncLogDao);
//# sourceMappingURL=MonthlySyncLogDao.js.map
|
dsl.repositoryId,
dsl.month
],
where: and(dsl.databaseId.equals(databaseId), dsl.synced.equals(synced))
|
index.ts
|
export { default as ErrorBoundary } from "./ErrorBoundary";
export * from "./ErrorBoundary";
|
||
runtime.go
|
/*---------------------------------------------------------------------------------------------
* Copyright (c) IBAX. All rights reserved.
* See LICENSE in the project root for license information.
*--------------------------------------------------------------------------------------------*/
package script
import (
"encoding/json"
"fmt"
"reflect"
"strconv"
"strings"
"time"
"unsafe"
"github.com/IBAX-io/go-ibax/packages/consts"
"github.com/IBAX-io/go-ibax/packages/converter"
"github.com/IBAX-io/go-ibax/packages/types"
"github.com/pkg/errors"
"github.com/shopspring/decimal"
log "github.com/sirupsen/logrus"
)
const (
statusNormal = iota
statusReturn
statusContinue
statusBreak
// Decimal is the constant string for decimal type
Decimal = `decimal.Decimal`
// Interface is the constant string for interface type
Interface = `interface`
File = `*types.Map`
brackets = `[]`
maxArrayIndex = 1000000
maxMapCount = 100000
maxCallDepth = 1000
memoryLimit = 128 << 20 // 128 MB
MaxErrLen = 150
)
var sysVars = map[string]struct{}{
sysVars_block: {},
sysVars_block_key_id: {},
sysVars_block_time: {},
sysVars_data: {},
sysVars_ecosystem_id: {},
sysVars_key_id: {},
sysVars_account_id: {},
sysVars_node_position: {},
sysVars_parent: {},
sysVars_original_contract: {},
sysVars_sc: {},
sysVars_contract: {},
sysVars_stack: {},
sysVars_this_contract: {},
sysVars_time: {},
sysVars_type: {},
sysVars_txcost: {},
sysVars_txhash: {},
sysVars_guest_key: {},
sysVars_gen_block: {},
sysVars_time_limit: {},
sysVars_pre_block_data_hash: {},
}
var (
ErrMemoryLimit = errors.New("Memory limit exceeded")
//ErrVMTimeLimit returns when the time limit exceeded
ErrVMTimeLimit = errors.New(`time limit exceeded`)
)
// VMError represents error of VM
type VMError struct {
Type string `json:"type"`
Error string `json:"error"`
}
type blockStack struct {
Block *CodeBlock
Offset int
}
// ErrInfo stores info about current contract or function
type ErrInfo struct {
Name string
Line uint16
}
// RunTime is needed for the execution of the byte-code
type RunTime struct {
stack []any
blocks []*blockStack
vars []any
extend map[string]any
vm *VM
cost int64
err error
unwrap bool
timeLimit bool
callDepth uint16
mem int64
memVars map[any]int64
errInfo ErrInfo
}
// NewRunTime creates a new RunTime for the virtual machine
func NewRunTime(vm *VM, cost int64) *RunTime {
return &RunTime{
stack: make([]any, 0, 1024),
vm: vm,
cost: cost,
memVars: make(map[any]int64),
}
}
func isSysVar(name string) bool
|
func (rt *RunTime) callFunc(cmd uint16, obj *ObjInfo) (err error) {
var (
count, in int
)
if rt.callDepth >= maxCallDepth {
return fmt.Errorf("max call depth")
}
rt.callDepth++
defer func() {
rt.callDepth--
}()
size := len(rt.stack)
in = obj.getInParams()
if rt.unwrap && cmd == cmdCallVari && size > 1 &&
reflect.TypeOf(rt.stack[size-2]).String() == `[]interface {}` {
count = rt.stack[size-1].(int)
arr := rt.stack[size-2].([]any)
rt.stack = rt.stack[:size-2]
for _, item := range arr {
rt.stack = append(rt.stack, item)
}
rt.stack = append(rt.stack, count-1+len(arr))
size = len(rt.stack)
}
rt.unwrap = false
if cmd == cmdCallVari {
count = rt.stack[size-1].(int)
size--
} else {
count = in
}
if obj.Type == ObjectType_Func {
var imap map[string][]any
if obj.Value.CodeBlock().Info.FuncInfo().Names != nil {
if rt.stack[size-1] != nil {
imap = rt.stack[size-1].(map[string][]any)
}
rt.stack = rt.stack[:size-1]
}
if cmd == cmdCallVari {
parcount := count + 1 - in
if parcount < 0 {
log.WithFields(log.Fields{"type": consts.VMError}).Error(errWrongCountPars.Error())
return errWrongCountPars
}
pars := make([]any, parcount)
shift := size - parcount
for i := parcount; i > 0; i-- {
pars[i-1] = rt.stack[size+i-parcount-1]
}
rt.stack = rt.stack[:shift]
rt.stack = append(rt.stack, pars)
}
finfo := obj.Value.CodeBlock().Info.FuncInfo()
if len(rt.stack) < len(finfo.Params) {
log.WithFields(log.Fields{"type": consts.VMError}).Error(errWrongCountPars.Error())
return errWrongCountPars
}
for i, v := range finfo.Params {
switch v.Kind() {
case reflect.String, reflect.Int64:
if v.Kind() == reflect.Int64 {
rv := reflect.ValueOf(rt.stack[len(rt.stack)-in+i])
switch rv.Kind() {
case reflect.Float64:
val, _ := converter.ValueToInt(rt.stack[len(rt.stack)-in+i])
rt.stack[len(rt.stack)-in+i] = val
}
}
if reflect.TypeOf(rt.stack[len(rt.stack)-in+i]) != v {
log.WithFields(log.Fields{"type": consts.VMError}).Error(eTypeParam)
return fmt.Errorf(eTypeParam, i+1)
}
}
}
if obj.Value.CodeBlock().Info.FuncInfo().Names != nil {
rt.stack = append(rt.stack, imap)
}
_, err = rt.RunCode(obj.Value.CodeBlock())
return
}
var (
stack Stacker
ok bool
result []reflect.Value
limit = 0
finfo = obj.Value.ExtFuncInfo()
foo = reflect.ValueOf(finfo.Func)
pars = make([]reflect.Value, in)
)
if stack, ok = rt.extend[Extend_sc].(Stacker); ok {
if err := stack.AppendStack(finfo.Name); err != nil {
return err
}
}
rt.extend[Extend_rt] = rt
auto := 0
for k := 0; k < in; k++ {
if len(finfo.Auto[k]) > 0 {
auto++
}
}
shift := size - count + auto
if finfo.Variadic {
shift = size - count
count += auto
limit = count - in + 1
}
i := count
for ; i > limit; i-- {
if len(finfo.Auto[count-i]) > 0 {
pars[count-i] = reflect.ValueOf(rt.extend[finfo.Auto[count-i]])
auto--
} else {
pars[count-i] = reflect.ValueOf(rt.stack[size-i+auto])
}
if !pars[count-i].IsValid() {
pars[count-i] = reflect.Zero(reflect.TypeOf(``))
}
}
if i > 0 {
pars[in-1] = reflect.ValueOf(rt.stack[size-i : size])
}
if finfo.Name == `ExecContract` && (pars[2].Kind() != reflect.String || !pars[3].IsValid()) {
return fmt.Errorf(`unknown function %v`, pars[1])
}
if finfo.Variadic {
result = foo.CallSlice(pars)
} else {
result = foo.Call(pars)
}
rt.stack = rt.stack[:shift]
if stack != nil {
stack.PopStack(finfo.Name)
}
for i, iret := range result {
// first return value of every extend function that makes queries to DB is cost
if i == 0 && rt.vm.FuncCallsDB != nil {
if _, ok := rt.vm.FuncCallsDB[finfo.Name]; ok {
cost := iret.Int()
if cost > rt.cost {
rt.cost = 0
rt.vm.logger.Error("paid CPU resource is over")
return fmt.Errorf("paid CPU resource is over")
}
rt.cost -= cost
continue
}
}
if finfo.Results[i].String() == `error` {
if iret.Interface() != nil {
rt.errInfo = ErrInfo{Name: finfo.Name}
return iret.Interface().(error)
}
} else {
rt.stack = append(rt.stack, iret.Interface())
}
}
return
}
func (rt *RunTime) extendFunc(name string) error {
var (
ok bool
f any
)
if f, ok = rt.extend[name]; !ok || reflect.ValueOf(f).Kind().String() != `func` {
return fmt.Errorf(`unknown function %s`, name)
}
size := len(rt.stack)
foo := reflect.ValueOf(f)
count := foo.Type().NumIn()
pars := make([]reflect.Value, count)
for i := count; i > 0; i-- {
pars[count-i] = reflect.ValueOf(rt.stack[size-i])
}
result := foo.Call(pars)
rt.stack = rt.stack[:size-count]
for i, iret := range result {
if foo.Type().Out(i).String() == `error` {
if iret.Interface() != nil {
return iret.Interface().(error)
}
} else {
rt.stack = append(rt.stack, iret.Interface())
}
}
return nil
}
func calcMem(v any) (mem int64) {
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Bool:
mem = 1
case reflect.Int8, reflect.Uint8:
mem = 1
case reflect.Int16, reflect.Uint16:
mem = 2
case reflect.Int32, reflect.Uint32:
mem = 4
case reflect.Int64, reflect.Uint64, reflect.Int, reflect.Uint:
mem = 8
case reflect.Float32:
mem = 4
case reflect.Float64:
mem = 8
case reflect.String:
mem += int64(rv.Len())
case reflect.Slice, reflect.Array:
mem = 12
for i := 0; i < rv.Len(); i++ {
mem += calcMem(rv.Index(i).Interface())
}
case reflect.Map:
mem = 4
for _, k := range rv.MapKeys() {
mem += calcMem(k.Interface())
mem += calcMem(rv.MapIndex(k).Interface())
}
default:
mem = int64(unsafe.Sizeof(v))
}
return
}
func (rt *RunTime) setExtendVar(k string, v any) {
rt.extend[k] = v
rt.recalcMemExtendVar(k)
}
func (rt *RunTime) recalcMemExtendVar(k string) {
mem := calcMem(rt.extend[k])
rt.mem += mem - rt.memVars[k]
rt.memVars[k] = mem
}
func (rt *RunTime) addVar(v any) {
rt.vars = append(rt.vars, v)
mem := calcMem(v)
rt.memVars[len(rt.vars)-1] = mem
rt.mem += mem
}
func (rt *RunTime) setVar(k int, v any) {
rt.vars[k] = v
rt.recalcMemVar(k)
}
func (rt *RunTime) recalcMemVar(k int) {
mem := calcMem(rt.vars[k])
rt.mem += mem - rt.memVars[k]
rt.memVars[k] = mem
}
func valueToBool(v any) bool {
switch val := v.(type) {
case int:
if val != 0 {
return true
}
case int64:
if val != 0 {
return true
}
case float64:
if val != 0.0 {
return true
}
case bool:
return val
case string:
return len(val) > 0
case []uint8:
return len(val) > 0
case []any:
return val != nil && len(val) > 0
case map[string]any:
return val != nil && len(val) > 0
case map[string]string:
return val != nil && len(val) > 0
case *types.Map:
return val != nil && val.Size() > 0
default:
dec, _ := decimal.NewFromString(fmt.Sprintf(`%v`, val))
return dec.Cmp(decimal.Zero) != 0
}
return false
}
// ValueToFloat converts interface (string, float64 or int64) to float64
func ValueToFloat(v any) (ret float64) {
var err error
switch val := v.(type) {
case float64:
ret = val
case int64:
ret = float64(val)
case string:
ret, err = strconv.ParseFloat(val, 64)
if err != nil {
log.WithFields(log.Fields{"type": consts.ConversionError, "error": err, "value": val}).Error("converting value from string to float")
}
case decimal.Decimal:
ret = val.InexactFloat64()
}
return
}
// ValueToDecimal converts interface (string, float64, Decimal or int64) to Decimal
func ValueToDecimal(v any) (ret decimal.Decimal, err error) {
switch val := v.(type) {
case float64:
ret = decimal.NewFromFloat(val).Floor()
case string:
ret, err = decimal.NewFromString(val)
if err != nil {
log.WithFields(log.Fields{"type": consts.ConversionError, "error": err, "value": val}).Error("converting value from string to decimal")
} else {
ret = ret.Floor()
}
case int64:
ret = decimal.New(val, 0)
default:
ret = val.(decimal.Decimal)
}
return
}
// SetCost sets the max cost of the execution.
func (rt *RunTime) SetCost(cost int64) {
rt.cost = cost
}
// Cost return the remain cost of the execution.
func (rt *RunTime) Cost() int64 {
return rt.cost
}
// SetVMError sets error of VM
func SetVMError(eType string, eText any) error {
errText := fmt.Sprintf(`%v`, eText)
if len(errText) > MaxErrLen {
errText = errText[:MaxErrLen] + `...`
}
out, err := json.Marshal(&VMError{Type: eType, Error: errText})
if err != nil {
log.WithFields(log.Fields{"type": consts.JSONMarshallError, "error": err}).Error("marshalling VMError")
out = []byte(`{"type": "panic", "error": "marshalling VMError"}`)
}
return fmt.Errorf(string(out))
}
func (rt *RunTime) getResultValue(item mapItem) (value any, err error) {
switch item.Type {
case mapConst:
value = item.Value
case mapExtend:
value = rt.extend[item.Value.(string)]
case mapVar:
ivar := item.Value.(*VarInfo)
var i int
for i = len(rt.blocks) - 1; i >= 0; i-- {
if ivar.Owner == rt.blocks[i].Block {
value = rt.vars[rt.blocks[i].Offset+ivar.Obj.Value.Int()]
break
}
}
if i < 0 {
err = fmt.Errorf(eWrongVar, ivar.Obj.Value)
}
case mapMap:
value, err = rt.getResultMap(item.Value.(*types.Map))
case mapArray:
value, err = rt.getResultArray(item.Value.([]mapItem))
}
return
}
func (rt *RunTime) getResultArray(cmd []mapItem) ([]any, error) {
initArr := make([]any, 0)
for _, val := range cmd {
value, err := rt.getResultValue(val)
if err != nil {
return nil, err
}
initArr = append(initArr, value)
}
return initArr, nil
}
func (rt *RunTime) getResultMap(cmd *types.Map) (*types.Map, error) {
initMap := types.NewMap()
for _, key := range cmd.Keys() {
val, _ := cmd.Get(key)
value, err := rt.getResultValue(val.(mapItem))
if err != nil {
return nil, err
}
initMap.Set(key, value)
}
return initMap, nil
}
func isSelfAssignment(dest, value any) bool {
if _, ok := value.([]any); !ok {
if _, ok = value.(*types.Map); !ok {
return false
}
}
if reflect.ValueOf(dest).Pointer() == reflect.ValueOf(value).Pointer() {
return true
}
switch v := value.(type) {
case []any:
for _, item := range v {
if isSelfAssignment(dest, item) {
return true
}
}
case *types.Map:
for _, item := range v.Values() {
if isSelfAssignment(dest, item) {
return true
}
}
}
return false
}
// RunCode executes CodeBlock
func (rt *RunTime) RunCode(block *CodeBlock) (status int, err error) {
var cmd *ByteCode
defer func() {
if r := recover(); r != nil {
err = errors.New(fmt.Sprintf(`%v`, r))
}
if err != nil && !strings.HasPrefix(err.Error(), `{`) {
var curContract, line string
if block.isParentContract() {
stack := block.Parent.Info.ContractInfo()
curContract = stack.Name
}
if stack, ok := rt.extend[Extend_stack].([]any); ok {
curContract = stack[len(stack)-1].(string)
}
line = "]"
if cmd != nil {
line = fmt.Sprintf(":%d]", cmd.Line)
}
if len(rt.errInfo.Name) > 0 && rt.errInfo.Name != `ExecContract` {
err = fmt.Errorf("%s [%s %s%s", err, rt.errInfo.Name, curContract, line)
rt.errInfo.Name = ``
} else {
out := err.Error()
if strings.HasSuffix(out, `]`) {
prev := strings.LastIndexByte(out, ' ')
if strings.HasPrefix(out[prev+1:], curContract+`:`) {
out = out[:prev+1]
} else {
out = out[:len(out)-1] + ` `
}
} else {
out += ` [`
}
err = fmt.Errorf(`%s%s%s`, out, curContract, line)
}
}
}()
top := make([]any, 8)
rt.blocks = append(rt.blocks, &blockStack{Block: block, Offset: len(rt.vars)})
var namemap map[string][]any
if block.Type == ObjectType_Func && block.Info.FuncInfo().Names != nil {
if rt.stack[len(rt.stack)-1] != nil {
namemap = rt.stack[len(rt.stack)-1].(map[string][]any)
}
rt.stack = rt.stack[:len(rt.stack)-1]
}
start := len(rt.stack)
varoff := len(rt.vars)
for vkey, vpar := range block.Vars {
rt.cost--
var value any
if block.Type == ObjectType_Func && vkey < len(block.Info.FuncInfo().Params) {
value = rt.stack[start-len(block.Info.FuncInfo().Params)+vkey]
} else {
value = reflect.New(vpar).Elem().Interface()
if vpar == reflect.TypeOf(&types.Map{}) {
value = types.NewMap()
} else if vpar == reflect.TypeOf([]any{}) {
value = make([]any, 0, len(rt.vars)+1)
}
}
rt.addVar(value)
}
if namemap != nil {
for key, item := range namemap {
params := (*block.Info.FuncInfo().Names)[key]
for i, value := range item {
if params.Variadic && i >= len(params.Params)-1 {
off := varoff + params.Offset[len(params.Params)-1]
rt.setVar(off, append(rt.vars[off].([]any), value))
} else {
rt.setVar(varoff+params.Offset[i], value)
}
}
}
}
if block.Type == ObjectType_Func {
start -= len(block.Info.FuncInfo().Params)
}
var (
assign []*VarInfo
tmpInt int64
tmpDec decimal.Decimal
)
labels := make([]int, 0)
main:
for ci := 0; ci < len(block.Code); ci++ {
rt.cost--
if rt.cost <= 0 {
break
}
if rt.timeLimit {
err = ErrVMTimeLimit
break
}
if rt.mem > memoryLimit {
rt.vm.logger.WithFields(log.Fields{"type": consts.VMError}).Warn(ErrMemoryLimit)
err = ErrMemoryLimit
break
}
cmd = block.Code[ci]
var bin any
size := len(rt.stack)
if size < int(cmd.Cmd>>8) {
rt.vm.logger.WithFields(log.Fields{"type": consts.VMError}).Error("stack is empty")
err = fmt.Errorf(`stack is empty`)
break
}
for i := 1; i <= int(cmd.Cmd>>8); i++ {
top[i-1] = rt.stack[size-i]
}
switch cmd.Cmd {
case cmdPush:
rt.stack = append(rt.stack, cmd.Value)
case cmdPushStr:
rt.stack = append(rt.stack, cmd.Value.(string))
case cmdIf:
if valueToBool(rt.stack[len(rt.stack)-1]) {
status, err = rt.RunCode(cmd.Value.(*CodeBlock))
}
case cmdElse:
if !valueToBool(rt.stack[len(rt.stack)-1]) {
status, err = rt.RunCode(cmd.Value.(*CodeBlock))
}
case cmdWhile:
val := rt.stack[len(rt.stack)-1]
rt.stack = rt.stack[:len(rt.stack)-1]
if valueToBool(val) {
status, err = rt.RunCode(cmd.Value.(*CodeBlock))
newci := labels[len(labels)-1]
labels = labels[:len(labels)-1]
if status == statusContinue {
ci = newci - 1
status = statusNormal
continue
}
if status == statusBreak {
status = statusNormal
break
}
}
case cmdLabel:
labels = append(labels, ci)
case cmdContinue:
status = statusContinue
case cmdBreak:
status = statusBreak
case cmdAssignVar:
assign = cmd.Value.([]*VarInfo)
case cmdAssign:
count := len(assign)
for ivar, item := range assign {
if item.Owner == nil {
if (*item).Obj.Type == ObjectType_ExtVar {
if isSysVar((*item).Obj.Value.String()) {
err = fmt.Errorf(eSysVar, (*item).Obj.Value.String())
rt.vm.logger.WithError(err).Error("modifying system variable")
break main
}
rt.setExtendVar((*item).Obj.Value.String(), rt.stack[len(rt.stack)-count+ivar])
}
} else {
var i int
for i = len(rt.blocks) - 1; i >= 0; i-- {
if item.Owner == rt.blocks[i].Block {
k := rt.blocks[i].Offset + item.Obj.Value.Int()
switch rt.blocks[i].Block.Vars[item.Obj.Value.Int()].String() {
case Decimal:
var v decimal.Decimal
v, err = ValueToDecimal(rt.stack[len(rt.stack)-count+ivar])
if err != nil {
break main
}
rt.setVar(k, v)
default:
rt.setVar(k, rt.stack[len(rt.stack)-count+ivar])
}
break
}
}
}
}
case cmdReturn:
status = statusReturn
case cmdError:
eType := msgError
if cmd.Value.(uint32) == keyWarning {
eType = msgWarning
} else if cmd.Value.(uint32) == keyInfo {
eType = msgInfo
}
err = SetVMError(eType, rt.stack[len(rt.stack)-1])
case cmdFuncName:
ifunc := cmd.Value.(FuncNameCmd)
mapoff := len(rt.stack) - 1 - ifunc.Count
if rt.stack[mapoff] == nil {
rt.stack[mapoff] = make(map[string][]any)
}
params := make([]any, 0, ifunc.Count)
for i := 0; i < ifunc.Count; i++ {
cur := rt.stack[mapoff+1+i]
if i == ifunc.Count-1 && rt.unwrap &&
reflect.TypeOf(cur).String() == `[]interface {}` {
params = append(params, cur.([]any)...)
rt.unwrap = false
} else {
params = append(params, cur)
}
}
rt.stack[mapoff].(map[string][]any)[ifunc.Name] = params
rt.stack = rt.stack[:mapoff+1]
continue
case cmdCallVari, cmdCall:
if cmd.Value.(*ObjInfo).Type == ObjectType_ExtFunc {
finfo := cmd.Value.(*ObjInfo).Value.ExtFuncInfo()
if rt.vm.ExtCost != nil {
cost := rt.vm.ExtCost(finfo.Name)
if cost > rt.cost {
rt.cost = 0
break main
} else if cost == -1 {
rt.cost -= CostCall
} else {
rt.cost -= cost
}
}
} else {
rt.cost -= CostCall
}
err = rt.callFunc(cmd.Cmd, cmd.Value.(*ObjInfo))
case cmdVar:
ivar := cmd.Value.(*VarInfo)
var i int
for i = len(rt.blocks) - 1; i >= 0; i-- {
if ivar.Owner == rt.blocks[i].Block {
rt.stack = append(rt.stack, rt.vars[rt.blocks[i].Offset+ivar.Obj.Value.Int()])
break
}
}
if i < 0 {
rt.vm.logger.WithFields(log.Fields{"var": ivar.Obj.Value}).Error("wrong var")
err = fmt.Errorf(`wrong var %v`, ivar.Obj.Value)
break main
}
case cmdExtend, cmdCallExtend:
if val, ok := rt.extend[cmd.Value.(string)]; ok {
rt.cost -= CostExtend
if cmd.Cmd == cmdCallExtend {
err = rt.extendFunc(cmd.Value.(string))
if err != nil {
rt.vm.logger.WithFields(log.Fields{"error": err, "cmd": cmd.Value.(string)}).Error("executing extended function")
err = fmt.Errorf(`extend function %s %s`, cmd.Value.(string), err.Error())
break main
}
} else {
switch varVal := val.(type) {
case int:
val = int64(varVal)
}
rt.stack = append(rt.stack, val)
}
} else {
rt.vm.logger.WithFields(log.Fields{"cmd": cmd.Value.(string)}).Error("unknown extend identifier")
err = fmt.Errorf(`unknown extend identifier %s`, cmd.Value.(string))
}
case cmdIndex:
rv := reflect.ValueOf(rt.stack[size-2])
itype := reflect.TypeOf(rt.stack[size-2]).String()
switch {
case itype == `*types.Map`:
if reflect.TypeOf(rt.stack[size-1]).String() != `string` {
err = fmt.Errorf(eMapIndex, reflect.TypeOf(rt.stack[size-1]).String())
break
}
v, found := rt.stack[size-2].(*types.Map).Get(rt.stack[size-1].(string))
if found {
rt.stack[size-2] = v
} else {
rt.stack[size-2] = nil
}
rt.stack = rt.stack[:size-1]
case itype[:2] == brackets:
if reflect.TypeOf(rt.stack[size-1]).String() != `int64` {
err = fmt.Errorf(eArrIndex, reflect.TypeOf(rt.stack[size-1]).String())
break
}
v := rv.Index(int(rt.stack[size-1].(int64)))
if v.IsValid() {
rt.stack[size-2] = v.Interface()
} else {
rt.stack[size-2] = nil
}
rt.stack = rt.stack[:size-1]
default:
itype := reflect.TypeOf(rt.stack[size-2]).String()
rt.vm.logger.WithFields(log.Fields{"vm_type": itype}).Error("type does not support indexing")
err = fmt.Errorf(`Type %s doesn't support indexing`, itype)
}
case cmdSetIndex:
itype := reflect.TypeOf(rt.stack[size-3]).String()
indexInfo := cmd.Value.(*IndexInfo)
var indexKey int
if indexInfo.Owner != nil {
for i := len(rt.blocks) - 1; i >= 0; i-- {
if indexInfo.Owner == rt.blocks[i].Block {
indexKey = rt.blocks[i].Offset + indexInfo.VarOffset
break
}
}
}
if isSelfAssignment(rt.stack[size-3], rt.stack[size-1]) {
err = errSelfAssignment
break main
}
switch {
case itype == `*types.Map`:
if rt.stack[size-3].(*types.Map).Size() > maxMapCount {
err = errMaxMapCount
break
}
if reflect.TypeOf(rt.stack[size-2]).String() != `string` {
err = fmt.Errorf(eMapIndex, reflect.TypeOf(rt.stack[size-2]).String())
break
}
rt.stack[size-3].(*types.Map).Set(rt.stack[size-2].(string),
reflect.ValueOf(rt.stack[size-1]).Interface())
rt.stack = rt.stack[:size-2]
case itype[:2] == brackets:
if reflect.TypeOf(rt.stack[size-2]).String() != `int64` {
err = fmt.Errorf(eArrIndex, reflect.TypeOf(rt.stack[size-2]).String())
break
}
ind := rt.stack[size-2].(int64)
if strings.Contains(itype, Interface) {
slice := rt.stack[size-3].([]any)
if int(ind) >= len(slice) {
if ind > maxArrayIndex {
err = errMaxArrayIndex
break
}
slice = append(slice, make([]any, int(ind)-len(slice)+1)...)
indexInfo := cmd.Value.(*IndexInfo)
if indexInfo.Owner == nil { // Extend variable $varname
rt.extend[indexInfo.Extend] = slice
} else {
rt.vars[indexKey] = slice
}
rt.stack[size-3] = slice
}
slice[ind] = rt.stack[size-1]
} else {
slice := rt.stack[size-3].([]map[string]string)
slice[ind] = rt.stack[size-1].(map[string]string)
}
rt.stack = rt.stack[:size-2]
default:
rt.vm.logger.WithFields(log.Fields{"vm_type": itype}).Error("type does not support indexing")
err = fmt.Errorf(`Type %s doesn't support indexing`, itype)
}
if indexInfo.Owner == nil {
rt.recalcMemExtendVar(indexInfo.Extend)
} else {
rt.recalcMemVar(indexKey)
}
case cmdUnwrapArr:
if reflect.TypeOf(rt.stack[size-1]).String() == `[]interface {}` {
rt.unwrap = true
}
case cmdSign:
switch top[0].(type) {
case float64:
rt.stack[size-1] = -top[0].(float64)
default:
rt.stack[size-1] = -top[0].(int64)
}
case cmdNot:
rt.stack[size-1] = !valueToBool(top[0])
case cmdAdd:
switch top[1].(type) {
case string:
switch top[0].(type) {
case string:
bin = top[1].(string) + top[0].(string)
case int64:
if tmpInt, err = converter.ValueToInt(top[1]); err == nil {
bin = tmpInt + top[0].(int64)
}
case float64:
bin = ValueToFloat(top[1]) + top[0].(float64)
default:
err = errUnsupportedType
break main
}
case float64:
switch top[0].(type) {
case string, int64, float64:
bin = top[1].(float64) + ValueToFloat(top[0])
default:
err = errUnsupportedType
break main
}
case int64:
switch top[0].(type) {
case string, int64:
if tmpInt, err = converter.ValueToInt(top[0]); err == nil {
bin = top[1].(int64) + tmpInt
}
case float64:
bin = ValueToFloat(top[1]) + top[0].(float64)
default:
err = errUnsupportedType
break main
}
default:
if reflect.TypeOf(top[1]).String() == Decimal &&
reflect.TypeOf(top[0]).String() == Decimal {
bin = top[1].(decimal.Decimal).Add(top[0].(decimal.Decimal))
} else {
err = errUnsupportedType
break main
}
}
case cmdSub:
switch top[1].(type) {
case string:
switch top[0].(type) {
case int64:
if tmpInt, err = converter.ValueToInt(top[1]); err == nil {
bin = tmpInt - top[0].(int64)
}
case float64:
bin = ValueToFloat(top[1]) - top[0].(float64)
default:
err = errUnsupportedType
break main
}
case float64:
switch top[0].(type) {
case string, int64, float64:
bin = top[1].(float64) - ValueToFloat(top[0])
default:
err = errUnsupportedType
break main
}
case int64:
switch top[0].(type) {
case int64, string:
if tmpInt, err = converter.ValueToInt(top[0]); err == nil {
bin = top[1].(int64) - tmpInt
}
case float64:
bin = ValueToFloat(top[1]) - top[0].(float64)
default:
err = errUnsupportedType
break main
}
default:
if reflect.TypeOf(top[1]).String() == Decimal &&
reflect.TypeOf(top[0]).String() == Decimal {
bin = top[1].(decimal.Decimal).Sub(top[0].(decimal.Decimal))
} else {
err = errUnsupportedType
break main
}
}
case cmdMul:
switch top[1].(type) {
case string:
switch top[0].(type) {
case int64:
if tmpInt, err = converter.ValueToInt(top[1]); err == nil {
bin = tmpInt * top[0].(int64)
}
case float64:
bin = ValueToFloat(top[1]) * top[0].(float64)
default:
err = errUnsupportedType
break main
}
case float64:
switch top[0].(type) {
case string, int64, float64:
bin = top[1].(float64) * ValueToFloat(top[0])
default:
err = errUnsupportedType
break main
}
case int64:
switch top[0].(type) {
case int64, string:
if tmpInt, err = converter.ValueToInt(top[0]); err == nil {
bin = top[1].(int64) * tmpInt
}
case float64:
bin = ValueToFloat(top[1]) * top[0].(float64)
default:
err = errUnsupportedType
break main
}
default:
if reflect.TypeOf(top[1]).String() == Decimal &&
reflect.TypeOf(top[0]).String() == Decimal {
bin = top[1].(decimal.Decimal).Mul(top[0].(decimal.Decimal))
} else {
err = errUnsupportedType
break main
}
}
case cmdDiv:
switch top[1].(type) {
case string:
switch v := top[0].(type) {
case int64:
if v == 0 {
err = errDivZero
break main
}
if tmpInt, err = converter.ValueToInt(top[1]); err == nil {
bin = tmpInt / v
}
case float64:
if v == 0 {
err = errDivZero
break main
}
bin = ValueToFloat(top[1]) / v
default:
err = errUnsupportedType
break main
}
case float64:
switch top[0].(type) {
case string, int64, float64:
vFloat := ValueToFloat(top[0])
if vFloat == 0 {
err = errDivZero
break main
}
bin = top[1].(float64) / vFloat
default:
err = errUnsupportedType
break main
}
case int64:
switch top[0].(type) {
case int64, string:
if tmpInt, err = converter.ValueToInt(top[0]); err == nil {
if tmpInt == 0 {
err = errDivZero
break main
}
bin = top[1].(int64) / tmpInt
}
case float64:
if top[0].(float64) == 0 {
err = errDivZero
break main
}
bin = ValueToFloat(top[1]) / top[0].(float64)
default:
err = errUnsupportedType
break main
}
default:
if reflect.TypeOf(top[1]).String() == Decimal &&
reflect.TypeOf(top[0]).String() == Decimal {
if top[0].(decimal.Decimal).Cmp(decimal.Zero) == 0 {
err = errDivZero
break main
}
bin = top[1].(decimal.Decimal).Div(top[0].(decimal.Decimal)).Floor()
} else {
err = errUnsupportedType
break main
}
}
case cmdAnd:
bin = valueToBool(top[1]) && valueToBool(top[0])
case cmdOr:
bin = valueToBool(top[1]) || valueToBool(top[0])
case cmdEqual, cmdNotEq:
if top[1] == nil || top[0] == nil {
bin = top[0] == top[1]
} else {
switch top[1].(type) {
case string:
switch top[0].(type) {
case int64:
if tmpInt, err = converter.ValueToInt(top[1]); err == nil {
bin = tmpInt == top[0].(int64)
}
case float64:
bin = ValueToFloat(top[1]) == top[0].(float64)
default:
if reflect.TypeOf(top[0]).String() == Decimal {
if tmpDec, err = ValueToDecimal(top[1]); err != nil {
break main
}
bin = tmpDec.Cmp(top[0].(decimal.Decimal)) == 0
} else {
bin = top[1].(string) == top[0].(string)
}
}
case float64:
bin = top[1].(float64) == ValueToFloat(top[0])
case int64:
switch top[0].(type) {
case int64:
bin = top[1].(int64) == top[0].(int64)
case float64:
bin = ValueToFloat(top[1]) == top[0].(float64)
default:
err = errUnsupportedType
break main
}
case bool:
switch top[0].(type) {
case bool:
bin = top[1].(bool) == top[0].(bool)
default:
err = errUnsupportedType
break main
}
default:
if tmpDec, err = ValueToDecimal(top[0]); err != nil {
break main
}
bin = top[1].(decimal.Decimal).Cmp(tmpDec) == 0
}
}
if cmd.Cmd == cmdNotEq {
bin = !bin.(bool)
}
case cmdLess, cmdNotLess:
switch top[1].(type) {
case string:
switch top[0].(type) {
case int64:
if tmpInt, err = converter.ValueToInt(top[1]); err == nil {
bin = tmpInt < top[0].(int64)
}
case float64:
bin = ValueToFloat(top[1]) < top[0].(float64)
default:
if reflect.TypeOf(top[0]).String() == Decimal {
if tmpDec, err = ValueToDecimal(top[1]); err != nil {
break main
}
bin = tmpDec.Cmp(top[0].(decimal.Decimal)) < 0
} else {
bin = top[1].(string) < top[0].(string)
}
}
case float64:
bin = top[1].(float64) < ValueToFloat(top[0])
case int64:
switch top[0].(type) {
case int64:
bin = top[1].(int64) < top[0].(int64)
case float64:
bin = ValueToFloat(top[1]) < top[0].(float64)
default:
err = errUnsupportedType
break main
}
default:
if tmpDec, err = ValueToDecimal(top[0]); err != nil {
break main
}
bin = top[1].(decimal.Decimal).Cmp(tmpDec) < 0
}
if cmd.Cmd == cmdNotLess {
bin = !bin.(bool)
}
case cmdGreat, cmdNotGreat:
switch top[1].(type) {
case string:
switch top[0].(type) {
case int64:
if tmpInt, err = converter.ValueToInt(top[1]); err == nil {
bin = tmpInt > top[0].(int64)
}
case float64:
bin = ValueToFloat(top[1]) > top[0].(float64)
default:
if reflect.TypeOf(top[0]).String() == Decimal {
if tmpDec, err = ValueToDecimal(top[1]); err != nil {
break main
}
bin = tmpDec.Cmp(top[0].(decimal.Decimal)) > 0
} else {
bin = top[1].(string) > top[0].(string)
}
}
case float64:
bin = top[1].(float64) > ValueToFloat(top[0])
case int64:
switch top[0].(type) {
case int64:
bin = top[1].(int64) > top[0].(int64)
case float64:
bin = ValueToFloat(top[1]) > top[0].(float64)
default:
err = errUnsupportedType
break main
}
default:
if tmpDec, err = ValueToDecimal(top[0]); err != nil {
break main
}
bin = top[1].(decimal.Decimal).Cmp(tmpDec) > 0
}
if cmd.Cmd == cmdNotGreat {
bin = !bin.(bool)
}
case cmdArrayInit:
initArray, err := rt.getResultArray(cmd.Value.([]mapItem))
if err != nil {
break main
}
rt.stack = append(rt.stack, initArray)
case cmdMapInit:
var initMap *types.Map
initMap, err = rt.getResultMap(cmd.Value.(*types.Map))
if err != nil {
break main
}
rt.stack = append(rt.stack, initMap)
default:
rt.vm.logger.WithFields(log.Fields{"vm_cmd": cmd.Cmd}).Error("Unknown command")
err = fmt.Errorf(`Unknown command %d`, cmd.Cmd)
}
if err != nil {
rt.err = err
break
}
if status == statusReturn || status == statusContinue || status == statusBreak {
break
}
if (cmd.Cmd >> 8) == 2 {
rt.stack[size-2] = bin
rt.stack = rt.stack[:size-1]
}
}
last := rt.blocks[len(rt.blocks)-1]
rt.blocks = rt.blocks[:len(rt.blocks)-1]
if status == statusReturn {
if last.Block.Type == ObjectType_Func {
lastResults := last.Block.Info.FuncInfo().Results
if len(lastResults) > len(rt.stack) {
var keyNames []string
for i := 0; i < len(lastResults); i++ {
keyNames = append(keyNames, lastResults[i].String())
}
err = fmt.Errorf("not enough arguments to return, need [%s]", strings.Join(keyNames, "|"))
return
}
for count := len(lastResults); count > 0; count-- {
rt.stack[start] = rt.stack[len(rt.stack)-count]
start++
}
status = statusNormal
} else {
return
}
}
rt.stack = rt.stack[:start]
if rt.cost <= 0 {
rt.vm.logger.WithFields(log.Fields{"type": consts.VMError}).Warn("runtime cost limit overflow")
err = fmt.Errorf(`runtime cost limit overflow`)
}
return
}
// Run executes CodeBlock with the specified parameters and extended variables and functions
func (rt *RunTime) Run(block *CodeBlock, params []any, extend map[string]any) (ret []any, err error) {
defer func() {
if r := recover(); r != nil {
//rt.vm.logger.WithFields(log.Fields{"type": consts.PanicRecoveredError, "error_info": r, "stack": string(debug.Stack())}).Error("runtime panic error")
err = fmt.Errorf(`runtime panic error,%v`, r)
}
}()
info := block.Info.FuncInfo()
rt.extend = extend
var (
genBlock bool
timer *time.Timer
)
if gen, ok := extend[Extend_gen_block]; ok {
genBlock = gen.(bool)
}
timeOver := func() {
rt.timeLimit = true
}
if genBlock {
timer = time.AfterFunc(time.Millisecond*time.Duration(extend[Extend_time_limit].(int64)), timeOver)
}
if _, err = rt.RunCode(block); err == nil {
off := len(rt.stack) - len(info.Results)
for i := 0; i < len(info.Results); i++ {
ret = append(ret, rt.stack[off+i])
}
}
if genBlock {
timer.Stop()
}
return
}
|
{
if _, ok := sysVars[name]; ok || strings.HasPrefix(name, Extend_loop) {
return true
}
return false
}
|
issue-17718-const-mut.rs
|
const
mut //~ ERROR: const globals cannot be mutable
//~^^ HELP you might want to declare a static instead
FOO: usize = 3;
fn main()
|
{
}
|
|
validate_3.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// compile-flags: -Z verbose -Z mir-emit-validate=1
struct Test {
x: i32
}
fn foo(_x: &i32) {}
fn main() {
// These internal unsafe functions should have no effect on the code generation.
unsafe fn _unused1() {}
fn _unused2(x: *const i32) -> i32 { unsafe { *x }}
let t = Test { x: 0 };
let t = &t;
foo(&t.x);
}
// END RUST SOURCE
|
// scope 1 {
// let _1: Test;
// scope 3 {
// let _2: &ReErased Test;
// }
// scope 4 {
// }
// }
// scope 2 {
// }
// let mut _3: ();
// let mut _4: &ReErased i32;
// let mut _5: &ReErased i32;
// bb0: {
// StorageLive(_1);
// _1 = Test { x: const 0i32 };
// StorageLive(_2);
// Validate(Suspend(ReScope(Remainder(BlockRemainder { block: ItemLocalId(20), first_statement_index: 3 }))), [_1: Test]);
// _2 = &ReErased _1;
// Validate(Acquire, [(*_2): Test/ReScope(Remainder(BlockRemainder { block: ItemLocalId(20), first_statement_index: 3 })) (imm)]);
// StorageLive(_4);
// StorageLive(_5);
// Validate(Suspend(ReScope(Node(ItemLocalId(18)))), [((*_2).0: i32): i32/ReScope(Remainder(BlockRemainder { block: ItemLocalId(20), first_statement_index: 3 })) (imm)]);
// _5 = &ReErased ((*_2).0: i32);
// Validate(Acquire, [(*_5): i32/ReScope(Node(ItemLocalId(18))) (imm)]);
// Validate(Suspend(ReScope(Node(ItemLocalId(18)))), [(*_5): i32/ReScope(Node(ItemLocalId(18))) (imm)]);
// _4 = &ReErased (*_5);
// Validate(Acquire, [(*_4): i32/ReScope(Node(ItemLocalId(18))) (imm)]);
// Validate(Release, [_3: (), _4: &ReScope(Node(ItemLocalId(18))) i32]);
// _3 = const foo(move _4) -> bb1;
// }
// bb1: {
// Validate(Acquire, [_3: ()]);
// EndRegion(ReScope(Node(ItemLocalId(18))));
// StorageDead(_4);
// StorageDead(_5);
// _0 = ();
// EndRegion(ReScope(Remainder(BlockRemainder { block: ItemLocalId(20), first_statement_index: 3 })));
// StorageDead(_2);
// StorageDead(_1);
// return;
// }
// }
// END rustc.main.EraseRegions.after.mir
|
// START rustc.main.EraseRegions.after.mir
// fn main() -> (){
// let mut _0: ();
|
pwm.rs
|
#![no_std]
#![no_main]
#![feature(type_alias_impl_trait)]
use defmt::*;
use embassy::executor::Spawner;
use embassy::time::{Duration, Timer};
use embassy_nrf::pwm::{Prescaler, SimplePwm};
use embassy_nrf::Peripherals;
use defmt_rtt as _; // global logger
use panic_probe as _;
// for i in range(1024): print(int((math.sin(i/512*math.pi)*0.4+0.5)**2*32767), ', ', end='')
static DUTY: [u16; 1024] = [
8191, 8272, 8353, 8434, 8516, 8598, 8681, 8764, 8847, 8931, 9015, 9099, 9184, 9269, 9354, 9440,
9526, 9613, 9700, 9787, 9874, 9962, 10050, 10139, 10227, 10316, 10406, 10495, 10585, 10675,
10766, 10857, 10948, 11039, 11131, 11223, 11315, 11407, 11500, 11592, 11685, 11779, 11872,
11966, 12060, 12154, 12248, 12343, 12438, 12533, 12628, 12723, 12818, 12914, 13010, 13106,
13202, 13298, 13394, 13491, 13587, 13684, 13781, 13878, 13975, 14072, 14169, 14266, 14364,
14461, 14558, 14656, 14754, 14851, 14949, 15046, 15144, 15242, 15339, 15437, 15535, 15632,
15730, 15828, 15925, 16023, 16120, 16218, 16315, 16412, 16510, 16607, 16704, 16801, 16898,
16995, 17091, 17188, 17284, 17380, 17477, 17572, 17668, 17764, 17859, 17955, 18050, 18145,
18239, 18334, 18428, 18522, 18616, 18710, 18803, 18896, 18989, 19082, 19174, 19266, 19358,
19449, 19540, 19631, 19722, 19812, 19902, 19991, 20081, 20169, 20258, 20346, 20434, 20521,
20608, 20695, 20781, 20867, 20952, 21037, 21122, 21206, 21290, 21373, 21456, 21538, 21620,
21701, 21782, 21863, 21943, 22022, 22101, 22179, 22257, 22335, 22412, 22488, 22564, 22639,
22714, 22788, 22861, 22934, 23007, 23079, 23150, 23220, 23290, 23360, 23429, 23497, 23564,
23631, 23698, 23763, 23828, 23892, 23956, 24019, 24081, 24143, 24204, 24264, 24324, 24383,
24441, 24499, 24555, 24611, 24667, 24721, 24775, 24828, 24881, 24933, 24983, 25034, 25083,
25132, 25180, 25227, 25273, 25319, 25363, 25407, 25451, 25493, 25535, 25575, 25615, 25655,
25693, 25731, 25767, 25803, 25838, 25873, 25906, 25939, 25971, 26002, 26032, 26061, 26089,
26117, 26144, 26170, 26195, 26219, 26242, 26264, 26286, 26307, 26327, 26346, 26364, 26381,
26397, 26413, 26427, 26441, 26454, 26466, 26477, 26487, 26496, 26505, 26512, 26519, 26525,
26530, 26534, 26537, 26539, 26540, 26541, 26540, 26539, 26537, 26534, 26530, 26525, 26519,
26512, 26505, 26496, 26487, 26477, 26466, 26454, 26441, 26427, 26413, 26397, 26381, 26364,
26346, 26327, 26307, 26286, 26264, 26242, 26219, 26195, 26170, 26144, 26117, 26089, 26061,
26032, 26002, 25971, 25939, 25906, 25873, 25838, 25803, 25767, 25731, 25693, 25655, 25615,
25575, 25535, 25493, 25451, 25407, 25363, 25319, 25273, 25227, 25180, 25132, 25083, 25034,
24983, 24933, 24881, 24828, 24775, 24721, 24667, 24611, 24555, 24499, 24441, 24383, 24324,
24264, 24204, 24143, 24081, 24019, 23956, 23892, 23828, 23763, 23698, 23631, 23564, 23497,
23429, 23360, 23290, 23220, 23150, 23079, 23007, 22934, 22861, 22788, 22714, 22639, 22564,
22488, 22412, 22335, 22257, 22179, 22101, 22022, 21943, 21863, 21782, 21701, 21620, 21538,
21456, 21373, 21290, 21206, 21122, 21037, 20952, 20867, 20781, 20695, 20608, 20521, 20434,
20346, 20258, 20169, 20081, 19991, 19902, 19812, 19722, 19631, 19540, 19449, 19358, 19266,
19174, 19082, 18989, 18896, 18803, 18710, 18616, 18522, 18428, 18334, 18239, 18145, 18050,
17955, 17859, 17764, 17668, 17572, 17477, 17380, 17284, 17188, 17091, 16995, 16898, 16801,
16704, 16607, 16510, 16412, 16315, 16218, 16120, 16023, 15925, 15828, 15730, 15632, 15535,
15437, 15339, 15242, 15144, 15046, 14949, 14851, 14754, 14656, 14558, 14461, 14364, 14266,
14169, 14072, 13975, 13878, 13781, 13684, 13587, 13491, 13394, 13298, 13202, 13106, 13010,
12914, 12818, 12723, 12628, 12533, 12438, 12343, 12248, 12154, 12060, 11966, 11872, 11779,
11685, 11592, 11500, 11407, 11315, 11223, 11131, 11039, 10948, 10857, 10766, 10675, 10585,
10495, 10406, 10316, 10227, 10139, 10050, 9962, 9874, 9787, 9700, 9613, 9526, 9440, 9354, 9269,
9184, 9099, 9015, 8931, 8847, 8764, 8681, 8598, 8516, 8434, 8353, 8272, 8191, 8111, 8031, 7952,
7873, 7794, 7716, 7638, 7561, 7484, 7407, 7331, 7255, 7180, 7105, 7031, 6957, 6883, 6810, 6738,
6665, 6594, 6522, 6451, 6381, 6311, 6241, 6172, 6104, 6036, 5968, 5901, 5834, 5767, 5702, 5636,
5571, 5507, 5443, 5379, 5316, 5253, 5191, 5130, 5068, 5008, 4947, 4888, 4828, 4769, 4711, 4653,
4596, 4539, 4482, 4426, 4371, 4316, 4261, 4207, 4153, 4100, 4047, 3995, 3943, 3892, 3841, 3791,
3741, 3691, 3642, 3594, 3546, 3498, 3451, 3404, 3358, 3312, 3267, 3222, 3178, 3134, 3090, 3047,
3005, 2962, 2921, 2879, 2839, 2798, 2758, 2719, 2680, 2641, 2603, 2565, 2528, 2491, 2454, 2418,
2382, 2347, 2312, 2278, 2244, 2210, 2177, 2144, 2112, 2080, 2048, 2017, 1986, 1956, 1926, 1896,
1867, 1838, 1810, 1781, 1754, 1726, 1699, 1673, 1646, 1620, 1595, 1570, 1545, 1520, 1496, 1472,
1449, 1426, 1403, 1380, 1358, 1336, 1315, 1294, 1273, 1252, 1232, 1212, 1192, 1173, 1154, 1135,
1117, 1099, 1081, 1063, 1046, 1029, 1012, 996, 980, 964, 948, 933, 918, 903, 888, 874, 860,
846, 833, 819, 806, 793, 781, 768, 756, 744, 733, 721, 710, 699, 688, 677, 667, 657, 647, 637,
627, 618, 609, 599, 591, 582, 574, 565, 557, 549, 541, 534, 526, 519, 512, 505, 498, 492, 485,
479, 473, 467, 461, 455, 450, 444, 439, 434, 429, 424, 419, 415, 410, 406, 402, 398, 394, 390,
386, 383, 379, 376, 373, 370, 367, 364, 361, 359, 356, 354, 351, 349, 347, 345, 343, 342, 340,
338, 337, 336, 334, 333, 332, 331, 330, 330, 329, 328, 328, 328, 327, 327, 327, 327, 327, 328,
328, 328, 329, 330, 330, 331, 332, 333, 334, 336, 337, 338, 340, 342, 343, 345, 347, 349, 351,
354, 356, 359, 361, 364, 367, 370, 373, 376, 379, 383, 386, 390, 394, 398, 402, 406, 410, 415,
419, 424, 429, 434, 439, 444, 450, 455, 461, 467, 473, 479, 485, 492, 498, 505, 512, 519, 526,
534, 541, 549, 557, 565, 574, 582, 591, 599, 609, 618, 627, 637, 647, 657, 667, 677, 688, 699,
710, 721, 733, 744, 756, 768, 781, 793, 806, 819, 833, 846, 860, 874, 888, 903, 918, 933, 948,
964, 980, 996, 1012, 1029, 1046, 1063, 1081, 1099, 1117, 1135, 1154, 1173, 1192, 1212, 1232,
1252, 1273, 1294, 1315, 1336, 1358, 1380, 1403, 1426, 1449, 1472, 1496, 1520, 1545, 1570, 1595,
1620, 1646, 1673, 1699, 1726, 1754, 1781, 1810, 1838, 1867, 1896, 1926, 1956, 1986, 2017, 2048,
2080, 2112, 2144, 2177, 2210, 2244, 2278, 2312, 2347, 2382, 2418, 2454, 2491, 2528, 2565, 2603,
2641, 2680, 2719, 2758, 2798, 2839, 2879, 2921, 2962, 3005, 3047, 3090, 3134, 3178, 3222, 3267,
3312, 3358, 3404, 3451, 3498, 3546, 3594, 3642, 3691, 3741, 3791, 3841, 3892, 3943, 3995, 4047,
4100, 4153, 4207, 4261, 4316, 4371, 4426, 4482, 4539, 4596, 4653, 4711, 4769, 4828, 4888, 4947,
5008, 5068, 5130, 5191, 5253, 5316, 5379, 5443, 5507, 5571, 5636, 5702, 5767, 5834, 5901, 5968,
6036, 6104, 6172, 6241, 6311, 6381, 6451, 6522, 6594, 6665, 6738, 6810, 6883, 6957, 7031, 7105,
7180, 7255, 7331, 7407, 7484, 7561, 7638, 7716, 7794, 7873, 7952, 8031, 8111,
];
#[embassy::main]
async fn main(_spawner: Spawner, p: Peripherals)
|
{
let mut pwm = SimplePwm::new_4ch(p.PWM0, p.P0_13, p.P0_14, p.P0_16, p.P0_15);
pwm.set_prescaler(Prescaler::Div1);
pwm.set_max_duty(32767);
info!("pwm initialized!");
let mut i = 0;
loop {
i += 1;
pwm.set_duty(0, DUTY[i % 1024]);
pwm.set_duty(1, DUTY[(i + 256) % 1024]);
pwm.set_duty(2, DUTY[(i + 512) % 1024]);
pwm.set_duty(3, DUTY[(i + 768) % 1024]);
Timer::after(Duration::from_millis(3)).await;
}
}
|
|
server.go
|
package service
var Musics = map[int]map[int]string{
1: map[int]string{
1: "d",
},
}
var Value uint32 = 0
// Server ;
type Server struct {
stopChannel chan bool
// いらない?
gatt *GATTServer
df *DFPlayerMini
}
func NewServer() *Server {
s := &Server{}
s.stopChannel = make(chan bool)
return s
}
func (s *Server) StartGATTServer() {
gatt := &GATTServer{}
gatt.Start(s)
s.gatt = gatt
if s.df != nil {
s.df.Close()
}
}
func (s *Server) StartUARTService() {
df := NewDFPlayer()
df.Connect(s)
s.df = df
}
func Close() {
|
unc (*Server) GetMusic() {
}
func (*Server) GetValue() uint32 {
return Value
}
func (*Server) SetValue(v uint32) {
Value = v
}
|
}
f
|
poll.rs
|
use cosmwasm_std::{
attr, Api, Binary, CosmosMsg, Decimal, Deps, DepsMut, Env, MessageInfo, Response, StdError,
StdResult, WasmMsg,
};
use spectrum_protocol::common::OrderBy;
use spectrum_protocol::platform::{
PollExecuteMsg, PollInfo, PollStatus, PollsResponse, VoteOption, VoterInfo, VotersResponse,
};
use crate::state::{
poll_indexer_store, poll_store, poll_voter_store, read_board, read_config, read_poll,
read_poll_voter, read_poll_voters, read_polls, read_state, state_store, Poll,
};
/// create a new poll
pub fn poll_start(
deps: DepsMut,
env: Env,
info: MessageInfo,
title: String,
description: String,
link: Option<String>,
execute_msgs: Vec<PollExecuteMsg>,
) -> StdResult<Response> {
validate_title(&title)?;
validate_description(&description)?;
validate_link(&link)?;
let sender_address_raw = deps.api.addr_canonicalize(info.sender.as_str())?;
let key = sender_address_raw.as_slice();
let weight = read_board(deps.storage, key);
if weight == 0 {
return Err(StdError::generic_err("unauthorized"));
}
let config = read_config(deps.storage)?;
let mut state = state_store(deps.storage).load()?;
let poll_id = state.poll_count + 1;
// Increase poll count & total deposit amount
state.poll_count += 1;
let new_poll = Poll {
id: poll_id,
creator: deps.api.addr_canonicalize(info.sender.as_str())?,
status: PollStatus::in_progress,
yes_votes: 0u32,
no_votes: 0u32,
end_height: env.block.height + config.voting_period,
title,
description,
link,
execute_msgs,
total_balance_at_end_poll: None,
};
poll_store(deps.storage).save(&poll_id.to_be_bytes(), &new_poll)?;
poll_indexer_store(deps.storage, &PollStatus::in_progress)
.save(&poll_id.to_be_bytes(), &true)?;
state_store(deps.storage).save(&state)?;
Ok(Response::new().add_attributes(vec![
attr("action", "create_poll"),
attr("creator", deps.api.addr_humanize(&new_poll.creator)?),
attr("poll_id", poll_id.to_string()),
attr("end_height", new_poll.end_height.to_string()),
|
}
const MIN_TITLE_LENGTH: usize = 4;
const MAX_TITLE_LENGTH: usize = 64;
const MIN_DESC_LENGTH: usize = 4;
const MAX_DESC_LENGTH: usize = 256;
const MIN_LINK_LENGTH: usize = 12;
const MAX_LINK_LENGTH: usize = 128;
/// validate_title returns an error if the title is invalid
fn validate_title(title: &str) -> StdResult<()> {
if title.len() < MIN_TITLE_LENGTH {
Err(StdError::generic_err("Title too short"))
} else if title.len() > MAX_TITLE_LENGTH {
Err(StdError::generic_err("Title too long"))
} else {
Ok(())
}
}
/// validate_description returns an error if the description is invalid
fn validate_description(description: &str) -> StdResult<()> {
if description.len() < MIN_DESC_LENGTH {
Err(StdError::generic_err("Description too short"))
} else if description.len() > MAX_DESC_LENGTH {
Err(StdError::generic_err("Description too long"))
} else {
Ok(())
}
}
/// validate_link returns an error if the link is invalid
fn validate_link(link: &Option<String>) -> StdResult<()> {
if let Some(link) = link {
if link.len() < MIN_LINK_LENGTH {
Err(StdError::generic_err("Link too short"))
} else if link.len() > MAX_LINK_LENGTH {
Err(StdError::generic_err("Link too long"))
} else {
Ok(())
}
} else {
Ok(())
}
}
pub fn poll_vote(
deps: DepsMut,
env: Env,
info: MessageInfo,
poll_id: u64,
vote: VoteOption,
) -> StdResult<Response> {
let sender_address_raw = deps.api.addr_canonicalize(info.sender.as_str())?;
let state = read_state(deps.storage)?;
if poll_id == 0 || state.poll_count < poll_id {
return Err(StdError::generic_err("Poll does not exist"));
}
let mut a_poll = poll_store(deps.storage).load(&poll_id.to_be_bytes())?;
if a_poll.status != PollStatus::in_progress || env.block.height > a_poll.end_height {
return Err(StdError::generic_err("Poll is not in progress"));
}
// Check the voter already has a vote on the poll
if read_poll_voter(deps.storage, poll_id, &sender_address_raw).is_ok() {
return Err(StdError::generic_err("User has already voted."));
}
let key = sender_address_raw.as_slice();
let weight = read_board(deps.storage, key);
if weight == 0 {
return Err(StdError::generic_err("unauthorized"));
}
// update tally info
if VoteOption::yes == vote {
a_poll.yes_votes += weight;
} else {
a_poll.no_votes += weight;
}
let vote_info = VoterInfo {
vote,
balance: weight,
};
// store poll voter && and update poll data
poll_voter_store(deps.storage, poll_id).save(sender_address_raw.as_slice(), &vote_info)?;
poll_store(deps.storage).save(&poll_id.to_be_bytes(), &a_poll)?;
Ok(Response::new().add_attributes(vec![
attr("action", "cast_vote"),
attr("poll_id", poll_id.to_string()),
attr("amount", weight.to_string()),
attr("voter", info.sender),
attr("vote_option", vote_info.vote.to_string()),
]))
}
/*
* Ends a poll.
*/
pub fn poll_end(deps: DepsMut, env: Env, poll_id: u64) -> StdResult<Response> {
let mut a_poll = poll_store(deps.storage).load(&poll_id.to_be_bytes())?;
if a_poll.status != PollStatus::in_progress {
return Err(StdError::generic_err("Poll is not in progress"));
}
let no = a_poll.no_votes;
let yes = a_poll.yes_votes;
let all_votes = yes + no;
let config = read_config(deps.storage)?;
let state = read_state(deps.storage)?;
if a_poll.end_height > env.block.height
&& Decimal::from_ratio(yes, state.total_weight) < config.threshold
&& Decimal::from_ratio(no, state.total_weight) < config.threshold
{
return Err(StdError::generic_err("Voting period has not expired"));
}
let quorum = Decimal::from_ratio(all_votes, state.total_weight);
let (passed, rejected_reason) = if quorum.is_zero() || quorum < config.quorum {
// Quorum: More than quorum of the total staked tokens at the end of the voting
// period need to have participated in the vote.
(false, "Quorum not reached")
} else if Decimal::from_ratio(yes, all_votes) < config.threshold {
(false, "Threshold not reached")
} else {
//Threshold: More than 50% of the tokens that participated in the vote
// (after excluding “Abstain” votes) need to have voted in favor of the proposal (“Yes”).
(true, "")
};
// Update poll status
a_poll.status = if passed {
PollStatus::passed
} else {
PollStatus::rejected
};
a_poll.total_balance_at_end_poll = Some(state.total_weight);
if env.block.height < a_poll.end_height {
a_poll.end_height = env.block.height;
}
poll_store(deps.storage).save(&poll_id.to_be_bytes(), &a_poll)?;
// Update poll indexer
poll_indexer_store(deps.storage, &PollStatus::in_progress).remove(&a_poll.id.to_be_bytes());
poll_indexer_store(deps.storage, &a_poll.status).save(&a_poll.id.to_be_bytes(), &true)?;
Ok(Response::new().add_attributes(vec![
attr("action", "end_poll"),
attr("poll_id", &poll_id.to_string()),
attr("rejected_reason", rejected_reason),
attr("passed", &passed.to_string()),
]))
}
/*
* Execute a msg of passed poll.
*/
pub fn poll_execute(deps: DepsMut, env: Env, poll_id: u64) -> StdResult<Response> {
let config = read_config(deps.storage)?;
let mut a_poll = poll_store(deps.storage).load(&poll_id.to_be_bytes())?;
if a_poll.status != PollStatus::passed {
return Err(StdError::generic_err("Poll is not in passed status"));
}
if a_poll.end_height + config.effective_delay > env.block.height {
return Err(StdError::generic_err("Effective delay has not expired"));
}
if a_poll.execute_msgs.is_empty() {
return Err(StdError::generic_err("The poll does not have execute_data"));
}
poll_indexer_store(deps.storage, &PollStatus::passed).remove(&poll_id.to_be_bytes());
poll_indexer_store(deps.storage, &PollStatus::executed).save(&poll_id.to_be_bytes(), &true)?;
a_poll.status = PollStatus::executed;
poll_store(deps.storage).save(&poll_id.to_be_bytes(), &a_poll)?;
let messages: Vec<CosmosMsg> = a_poll.execute_msgs.into_iter().map(match_msg).collect();
Ok(Response::new().add_messages(messages).add_attributes(vec![
attr("action", "execute_poll"),
attr("poll_id", poll_id.to_string()),
]))
}
fn match_msg(msg: PollExecuteMsg) -> CosmosMsg {
match msg {
PollExecuteMsg::execute { contract, msg } => CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: contract,
msg: Binary(msg.into_bytes()),
funds: vec![],
}),
}
}
/// ExpirePoll is used to make the poll as expired state for querying purpose
pub fn poll_expire(deps: DepsMut, env: Env, poll_id: u64) -> StdResult<Response> {
let config = read_config(deps.storage)?;
let mut a_poll = poll_store(deps.storage).load(&poll_id.to_be_bytes())?;
if a_poll.status != PollStatus::passed {
return Err(StdError::generic_err("Poll is not in passed status"));
}
if a_poll.execute_msgs.is_empty() {
return Err(StdError::generic_err(
"Cannot make a text proposal to expired state",
));
}
if a_poll.end_height + config.expiration_period > env.block.height {
return Err(StdError::generic_err("Expire height has not been reached"));
}
poll_indexer_store(deps.storage, &PollStatus::passed).remove(&poll_id.to_be_bytes());
poll_indexer_store(deps.storage, &PollStatus::expired).save(&poll_id.to_be_bytes(), &true)?;
a_poll.status = PollStatus::expired;
poll_store(deps.storage).save(&poll_id.to_be_bytes(), &a_poll)?;
Ok(Response::new().add_attributes(vec![
attr("action", "expire_poll"),
attr("poll_id", poll_id.to_string()),
]))
}
fn map_poll(poll: Poll, api: &dyn Api) -> StdResult<PollInfo> {
Ok(PollInfo {
id: poll.id,
creator: api.addr_humanize(&poll.creator).unwrap().to_string(),
status: poll.status.clone(),
end_height: poll.end_height,
title: poll.title,
description: poll.description,
link: poll.link,
execute_msgs: poll.execute_msgs,
yes_votes: poll.yes_votes,
no_votes: poll.no_votes,
total_balance_at_end_poll: poll.total_balance_at_end_poll,
})
}
pub fn query_poll(deps: Deps, poll_id: u64) -> StdResult<PollInfo> {
let poll = read_poll(deps.storage, &poll_id.to_be_bytes())?;
if poll.is_none() {
return Err(StdError::generic_err("Poll does not exist"));
}
map_poll(poll.unwrap(), deps.api)
}
pub fn query_polls(
deps: Deps,
filter: Option<PollStatus>,
start_after: Option<u64>,
limit: Option<u32>,
order_by: Option<OrderBy>,
) -> StdResult<PollsResponse> {
let polls = read_polls(deps.storage, filter, start_after, limit, order_by)?;
let poll_responses: StdResult<Vec<PollInfo>> = polls
.into_iter()
.map(|poll| map_poll(poll, deps.api))
.collect();
Ok(PollsResponse {
polls: poll_responses?,
})
}
pub fn query_voters(
deps: Deps,
poll_id: u64,
start_after: Option<String>,
limit: Option<u32>,
order_by: Option<OrderBy>,
) -> StdResult<VotersResponse> {
let poll = match read_poll(deps.storage, &poll_id.to_be_bytes())? {
Some(poll) => Some(poll),
None => return Err(StdError::generic_err("Poll does not exist")),
}
.unwrap();
let voters = if poll.status != PollStatus::in_progress {
vec![]
} else {
read_poll_voters(
deps.storage,
poll_id,
match start_after {
Some(sa) => Some(deps.api.addr_canonicalize(&sa)?),
None => None,
},
limit,
order_by,
)?
};
let voters_response: StdResult<Vec<(String, VoterInfo)>> = voters
.into_iter()
.map(|voter_info| {
Ok((
deps.api.addr_humanize(&voter_info.0)?.to_string(),
VoterInfo {
vote: voter_info.1.vote,
balance: voter_info.1.balance,
},
))
})
.collect();
Ok(VotersResponse {
voters: voters_response?,
})
}
|
]))
|
routing.rs
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/// Tests of capability routing in ComponentManager.
///
/// Most routing tests should be defined as methods on the ::routing_test_helpers::CommonRoutingTest
/// type and should be run both in this file (using a CommonRoutingTest<RoutingTestBuilder>) and in
/// the cm_fidl_analyzer_tests crate (using a specialization of CommonRoutingTest for the static
/// routing analyzer). This ensures that the static analyzer's routing verification is consistent
/// with ComponentManager's intended routing behavior.
///
/// However, tests of behavior that is out-of-scope for the static analyzer (e.g. routing to/from
/// dynamic component instances) should be defined here.
use {
crate::{
capability::{
CapabilityProvider, CapabilitySource, ComponentCapability, InternalCapability,
OptionalTask,
},
channel,
config::{AllowlistEntry, CapabilityAllowlistKey, CapabilityAllowlistSource},
framework::REALM_SERVICE,
model::{
actions::{
ActionSet, DestroyChildAction, PurgeAction, PurgeChildAction, ShutdownAction,
},
error::ModelError,
events::registry::EventSubscription,
hooks::{Event, EventPayload, EventType, Hook, HooksRegistration},
rights,
routing::{RouteRequest, RouteSource, RoutingError},
testing::{routing_test_helpers::*, test_helpers::*},
},
},
anyhow::Error,
async_trait::async_trait,
cm_rust::*,
cm_rust_testing::*,
fidl::endpoints::ServerEnd,
fidl_fidl_examples_echo::{self as echo},
fidl_fuchsia_component_runner as fcrunner, fidl_fuchsia_mem as fmem, fidl_fuchsia_sys2 as fsys,
fuchsia_async as fasync, fuchsia_zircon as zx,
futures::{join, lock::Mutex, StreamExt, TryStreamExt},
log::*,
maplit::hashmap,
matches::assert_matches,
moniker::{AbsoluteMoniker, ExtendedMoniker},
routing::{error::ComponentInstanceError, route_capability},
routing_test_helpers::{
default_service_capability, instantiate_common_routing_tests, RoutingTestModel,
},
std::{
collections::HashSet,
convert::{TryFrom, TryInto},
path::PathBuf,
sync::{Arc, Weak},
},
vfs::pseudo_directory,
};
instantiate_common_routing_tests! { RoutingTestBuilder }
/// a
/// \
/// b
///
/// b: uses framework service /svc/fuchsia.sys2.Realm
#[fuchsia::test]
async fn use_framework_service() {
pub struct MockRealmCapabilityProvider {
scope_moniker: AbsoluteMoniker,
host: MockRealmCapabilityHost,
}
impl MockRealmCapabilityProvider {
pub fn new(scope_moniker: AbsoluteMoniker, host: MockRealmCapabilityHost) -> Self {
Self { scope_moniker, host }
}
}
#[async_trait]
impl CapabilityProvider for MockRealmCapabilityProvider {
async fn open(
self: Box<Self>,
_flags: u32,
_open_mode: u32,
_relative_path: PathBuf,
server_end: &mut zx::Channel,
) -> Result<OptionalTask, ModelError> {
let server_end = channel::take_channel(server_end);
let stream = ServerEnd::<fsys::RealmMarker>::new(server_end)
.into_stream()
.expect("could not convert channel into stream");
let scope_moniker = self.scope_moniker.clone();
let host = self.host.clone();
Ok(fasync::Task::spawn(async move {
if let Err(e) = host.serve(scope_moniker, stream).await {
// TODO: Set an epitaph to indicate this was an unexpected error.
warn!("serve_realm failed: {}", e);
}
})
.into())
}
}
#[async_trait]
impl Hook for MockRealmCapabilityHost {
async fn on(self: Arc<Self>, event: &Event) -> Result<(), ModelError> {
if let Ok(EventPayload::CapabilityRouted {
source: CapabilitySource::Framework { capability, component },
capability_provider,
}) = &event.result
{
let mut capability_provider = capability_provider.lock().await;
*capability_provider = self
.on_scoped_framework_capability_routed_async(
component.moniker.clone(),
&capability,
capability_provider.take(),
)
.await?;
}
Ok(())
}
}
#[derive(Clone)]
pub struct MockRealmCapabilityHost {
/// List of calls to `BindChild` with component's relative moniker.
bind_calls: Arc<Mutex<Vec<String>>>,
}
impl MockRealmCapabilityHost {
pub fn new() -> Self {
Self { bind_calls: Arc::new(Mutex::new(vec![])) }
}
pub fn bind_calls(&self) -> Arc<Mutex<Vec<String>>> {
self.bind_calls.clone()
}
async fn serve(
&self,
scope_moniker: AbsoluteMoniker,
mut stream: fsys::RealmRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await? {
match request {
fsys::RealmRequest::BindChild { responder, .. } => {
self.bind_calls.lock().await.push(
scope_moniker
.path()
.last()
.expect("did not expect root component")
.name()
.to_string(),
);
responder.send(&mut Ok(()))?;
}
_ => {}
}
}
Ok(())
}
pub async fn on_scoped_framework_capability_routed_async<'a>(
&'a self,
scope_moniker: AbsoluteMoniker,
capability: &'a InternalCapability,
capability_provider: Option<Box<dyn CapabilityProvider>>,
) -> Result<Option<Box<dyn CapabilityProvider>>, ModelError> {
// If some other capability has already been installed, then there's nothing to
// do here.
if capability.matches_protocol(&REALM_SERVICE) {
Ok(Some(Box::new(MockRealmCapabilityProvider::new(
scope_moniker.clone(),
self.clone(),
)) as Box<dyn CapabilityProvider>))
} else {
Ok(capability_provider)
}
}
}
let components = vec![
("a", ComponentDeclBuilder::new().add_lazy_child("b").build()),
(
"b",
ComponentDeclBuilder::new()
.use_(UseDecl::Protocol(UseProtocolDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Framework,
source_name: "fuchsia.sys2.Realm".into(),
target_path: CapabilityPath::try_from("/svc/fuchsia.sys2.Realm").unwrap(),
}))
.build(),
),
];
let test = RoutingTest::new("a", components).await;
// RoutingTest installs the real RealmCapabilityHost. Installing the
// MockRealmCapabilityHost here overrides the previously installed one.
let realm_service_host = Arc::new(MockRealmCapabilityHost::new());
test.model
.root
.hooks
.install(vec![HooksRegistration::new(
"MockRealmCapabilityHost",
vec![EventType::CapabilityRouted],
Arc::downgrade(&realm_service_host) as Weak<dyn Hook>,
)])
.await;
test.check_use_realm(vec!["b:0"].into(), realm_service_host.bind_calls()).await;
}
/// a
/// \
/// b
///
/// a: offers service /svc/foo from self as /svc/bar
/// b: uses service /svc/bar as /svc/hippo
///
/// This test verifies that the parent, if subscribed to the CapabilityRequested event will receive
/// if when the child connects to /svc/hippo.
#[fuchsia::test]
async fn capability_requested_event_at_parent() {
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.protocol(ProtocolDeclBuilder::new("foo_svc").build())
.offer(OfferDecl::Protocol(OfferProtocolDecl {
source: OfferSource::Self_,
source_name: "foo_svc".into(),
target_name: "bar_svc".into(),
target: OfferTarget::Child("b".to_string()),
dependency_type: DependencyType::Strong,
}))
.use_(UseDecl::Protocol(UseProtocolDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Parent,
source_name: "fuchsia.sys2.EventSource".try_into().unwrap(),
target_path: "/svc/fuchsia.sys2.EventSource".try_into().unwrap(),
}))
.use_(UseDecl::Event(UseEventDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Framework,
source_name: "capability_requested".into(),
target_name: "capability_requested".into(),
filter: Some(hashmap!{"name".to_string() => DictionaryValue::Str("foo_svc".to_string())}),
mode: cm_rust::EventMode::Async,
}))
.use_(UseDecl::Event(UseEventDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Framework,
source_name: "resolved".into(),
target_name: "resolved".into(),
filter: None,
mode: cm_rust::EventMode::Sync,
}))
.use_(UseDecl::EventStream(UseEventStreamDecl {
name: CapabilityName::try_from("StartComponentTree").unwrap(),
subscriptions: vec![cm_rust::EventSubscription {
event_name: "resolved".into(),
mode: cm_rust::EventMode::Sync,
}],
}))
.add_lazy_child("b")
.build(),
),
(
"b",
ComponentDeclBuilder::new()
.use_(UseDecl::Protocol(UseProtocolDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Parent,
source_name: "bar_svc".into(),
target_path: CapabilityPath::try_from("/svc/hippo").unwrap(),
}))
.build(),
),
];
let test = RoutingTest::new("a", components).await;
let namespace_root = test.bind_and_get_namespace(AbsoluteMoniker::root()).await;
let mut event_stream = capability_util::subscribe_to_events(
&namespace_root,
&CapabilityPath::try_from("/svc/fuchsia.sys2.EventSource").unwrap(),
vec![EventSubscription::new("capability_requested".into(), EventMode::Async)],
)
.await
.unwrap();
let namespace_b = test.bind_and_get_namespace(vec!["b:0"].into()).await;
let _echo_proxy = capability_util::connect_to_svc_in_namespace::<echo::EchoMarker>(
&namespace_b,
&"/svc/hippo".try_into().unwrap(),
)
.await;
let event = match event_stream.next().await {
Some(Ok(fsys::EventStreamRequest::OnEvent { event, .. })) => event,
_ => panic!("Event not found"),
};
// 'b' is the target and 'a' is receiving the event so the relative moniker
// is './b:0'.
assert_matches!(&event,
fsys::Event {
header: Some(fsys::EventHeader {
moniker: Some(moniker), .. }), ..
} if *moniker == "./b:0".to_string() );
assert_matches!(&event,
fsys::Event {
header: Some(fsys::EventHeader {
component_url: Some(component_url), .. }), ..
} if *component_url == "test:///b".to_string() );
assert_matches!(&event,
fsys::Event {
event_result: Some(
fsys::EventResult::Payload(
fsys::EventPayload::CapabilityRequested(
fsys::CapabilityRequestedPayload { name: Some(name), .. }))), ..}
if *name == "foo_svc".to_string()
);
}
/// a
/// \
/// b
/// / \
/// [c] [d]
/// a: offers service /svc/hippo to b
/// b: offers service /svc/hippo to collection, creates [c]
/// [c]: instance in collection uses service /svc/hippo
/// [d]: ditto, but with /data/hippo
#[fuchsia::test]
async fn use_in_collection() {
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.directory(DirectoryDeclBuilder::new("foo_data").build())
.protocol(ProtocolDeclBuilder::new("foo_svc").build())
.offer(OfferDecl::Directory(OfferDirectoryDecl {
source_name: "foo_data".into(),
source: OfferSource::Self_,
target_name: "hippo_data".into(),
target: OfferTarget::Child("b".to_string()),
rights: Some(*rights::READ_RIGHTS),
subdir: None,
dependency_type: DependencyType::Strong,
}))
.offer(OfferDecl::Protocol(OfferProtocolDecl {
source_name: "foo_svc".into(),
source: OfferSource::Self_,
target_name: "hippo_svc".into(),
target: OfferTarget::Child("b".to_string()),
dependency_type: DependencyType::Strong,
}))
.add_lazy_child("b")
.build(),
),
(
"b",
ComponentDeclBuilder::new()
.use_(UseDecl::Protocol(UseProtocolDecl {
source: UseSource::Framework,
source_name: "fuchsia.sys2.Realm".into(),
target_path: CapabilityPath::try_from("/svc/fuchsia.sys2.Realm").unwrap(),
dependency_type: DependencyType::Strong,
}))
.offer(OfferDecl::Directory(OfferDirectoryDecl {
source_name: "hippo_data".into(),
source: OfferSource::Parent,
target_name: "hippo_data".into(),
target: OfferTarget::Collection("coll".to_string()),
rights: Some(*rights::READ_RIGHTS),
subdir: None,
dependency_type: DependencyType::Strong,
}))
.offer(OfferDecl::Protocol(OfferProtocolDecl {
source_name: "hippo_svc".into(),
source: OfferSource::Parent,
target_name: "hippo_svc".into(),
target: OfferTarget::Collection("coll".to_string()),
dependency_type: DependencyType::Strong,
}))
.add_transient_collection("coll")
.build(),
),
(
"c",
ComponentDeclBuilder::new()
.use_(UseDecl::Directory(UseDirectoryDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Parent,
source_name: "hippo_data".into(),
target_path: CapabilityPath::try_from("/data/hippo").unwrap(),
rights: *rights::READ_RIGHTS,
subdir: None,
}))
.build(),
),
(
"d",
ComponentDeclBuilder::new()
.use_(UseDecl::Protocol(UseProtocolDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Parent,
source_name: "hippo_svc".into(),
target_path: CapabilityPath::try_from("/svc/hippo").unwrap(),
}))
.build(),
),
];
let test = RoutingTest::new("a", components).await;
test.create_dynamic_child(
vec!["b:0"].into(),
"coll",
ChildDecl {
name: "c".to_string(),
url: "test:///c".to_string(),
startup: fsys::StartupMode::Lazy,
environment: None,
},
)
.await;
test.create_dynamic_child(
vec!["b:0"].into(),
"coll",
ChildDecl {
name: "d".to_string(),
url: "test:///d".to_string(),
startup: fsys::StartupMode::Lazy,
environment: None,
},
)
.await;
test.check_use(vec!["b:0", "coll:c:1"].into(), CheckUse::default_directory(ExpectedResult::Ok))
.await;
test.check_use(
vec!["b:0", "coll:d:2"].into(),
CheckUse::Protocol { path: default_service_capability(), expected_res: ExpectedResult::Ok },
)
.await;
}
/// a
/// \
/// b
/// \
/// [c]
/// a: offers service /svc/hippo to b
/// b: creates [c]
/// [c]: tries to use /svc/hippo, but can't because service was not offered to its collection
#[fuchsia::test]
async fn use_in_collection_not_offered() {
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.directory(DirectoryDeclBuilder::new("foo_data").build())
.protocol(ProtocolDeclBuilder::new("foo_svc").build())
.offer(OfferDecl::Directory(OfferDirectoryDecl {
source_name: "foo_data".into(),
source: OfferSource::Self_,
target_name: "hippo_data".into(),
target: OfferTarget::Child("b".to_string()),
rights: Some(*rights::READ_RIGHTS),
subdir: None,
dependency_type: DependencyType::Strong,
}))
.offer(OfferDecl::Protocol(OfferProtocolDecl {
source_name: "foo_svc".into(),
source: OfferSource::Self_,
target_name: "hippo_svc".into(),
target: OfferTarget::Child("b".to_string()),
dependency_type: DependencyType::Strong,
}))
.add_lazy_child("b")
.build(),
),
(
"b",
ComponentDeclBuilder::new()
.use_(UseDecl::Protocol(UseProtocolDecl {
source: UseSource::Framework,
source_name: "fuchsia.sys2.Realm".into(),
target_path: CapabilityPath::try_from("/svc/fuchsia.sys2.Realm").unwrap(),
dependency_type: DependencyType::Strong,
}))
.add_transient_collection("coll")
.build(),
),
(
"c",
ComponentDeclBuilder::new()
.use_(UseDecl::Directory(UseDirectoryDecl {
source: UseSource::Parent,
source_name: "hippo_data".into(),
target_path: CapabilityPath::try_from("/data/hippo").unwrap(),
rights: *rights::READ_RIGHTS,
subdir: None,
dependency_type: DependencyType::Strong,
}))
.use_(UseDecl::Protocol(UseProtocolDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Parent,
source_name: "hippo_svc".into(),
target_path: CapabilityPath::try_from("/svc/hippo").unwrap(),
}))
.build(),
),
];
let test = RoutingTest::new("a", components).await;
test.create_dynamic_child(
vec!["b:0"].into(),
"coll",
ChildDecl {
name: "c".to_string(),
url: "test:///c".to_string(),
startup: fsys::StartupMode::Lazy,
environment: None,
},
)
.await;
test.check_use(
vec!["b:0", "coll:c:1"].into(),
CheckUse::default_directory(ExpectedResult::Err(zx::Status::UNAVAILABLE)),
)
.await;
test.check_use(
vec!["b:0", "coll:c:1"].into(),
CheckUse::Protocol {
path: default_service_capability(),
expected_res: ExpectedResult::Err(zx::Status::UNAVAILABLE),
},
)
.await;
}
#[fuchsia::test]
async fn destroying_instance_kills_framework_service_task() {
let components = vec![
("a", ComponentDeclBuilder::new().add_lazy_child("b").build()),
(
"b",
ComponentDeclBuilder::new()
.use_(UseDecl::Protocol(UseProtocolDecl {
source: UseSource::Framework,
source_name: "fuchsia.sys2.Realm".into(),
target_path: CapabilityPath::try_from("/svc/fuchsia.sys2.Realm").unwrap(),
dependency_type: DependencyType::Strong,
}))
.build(),
),
];
let test = RoutingTest::new("a", components).await;
// Connect to `Realm`, which is a framework service.
let namespace = test.bind_and_get_namespace(vec!["b:0"].into()).await;
let proxy = capability_util::connect_to_svc_in_namespace::<fsys::RealmMarker>(
&namespace,
&"/svc/fuchsia.sys2.Realm".try_into().unwrap(),
)
.await;
// Destroy `b`. This should cause the task hosted for `Realm` to be cancelled.
let root = test.model.look_up(&vec![].into()).await.unwrap();
ActionSet::register(root.clone(), DestroyChildAction::new("b".into()))
.await
.expect("destroy failed");
ActionSet::register(root.clone(), PurgeChildAction::new("b:0".into()))
.await
.expect("destroy failed");
let mut event_stream = proxy.take_event_stream();
assert_matches!(event_stream.next().await, None);
}
/// a
/// \
/// b
///
/// a: declares runner "elf" with service "/svc/runner" from "self".
/// a: registers runner "elf" from self in environment as "hobbit".
/// b: uses runner "hobbit".
#[fuchsia::test]
async fn use_runner_from_parent_environment() {
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.add_child(ChildDeclBuilder::new_lazy_child("b").environment("env").build())
.add_environment(
EnvironmentDeclBuilder::new()
.name("env")
.extends(fsys::EnvironmentExtends::Realm)
.add_runner(RunnerRegistration {
source_name: "elf".into(),
source: RegistrationSource::Self_,
target_name: "hobbit".into(),
})
.build(),
)
.runner(RunnerDecl {
name: "elf".into(),
source_path: CapabilityPath::try_from("/svc/runner").unwrap(),
})
.build(),
),
("b", ComponentDeclBuilder::new_empty_component().add_program("hobbit").build()),
];
// Set up the system.
let (runner_service, mut receiver) =
create_service_directory_entry::<fcrunner::ComponentRunnerMarker>();
let universe = RoutingTestBuilder::new("a", components)
// Component "b" exposes a runner service.
.add_outgoing_path("a", CapabilityPath::try_from("/svc/runner").unwrap(), runner_service)
.build()
.await;
join!(
// Bind "b:0". We expect to see a call to our runner service for the new component.
async move {
universe.bind_instance(&vec!["b:0"].into()).await.unwrap();
},
// Wait for a request, and ensure it has the correct URL.
async move {
assert_eq!(
wait_for_runner_request(&mut receiver).await.resolved_url,
Some("test:///b_resolved".to_string())
);
}
);
}
/// a
/// \
/// [b]
///
/// a: declares runner "elf" with service "/svc/runner" from "self".
/// a: registers runner "elf" from self in environment as "hobbit".
/// b: instance in collection uses runner "hobbit".
#[fuchsia::test]
async fn use_runner_from_environment_in_collection() {
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.add_collection(
CollectionDeclBuilder::new_transient_collection("coll")
.environment("env")
.build(),
)
.add_environment(
EnvironmentDeclBuilder::new()
.name("env")
.extends(fsys::EnvironmentExtends::Realm)
.add_runner(RunnerRegistration {
source_name: "elf".into(),
source: RegistrationSource::Self_,
target_name: "hobbit".into(),
})
.build(),
)
.use_(UseDecl::Protocol(UseProtocolDecl {
source: UseSource::Framework,
source_name: "fuchsia.sys2.Realm".into(),
target_path: CapabilityPath::try_from("/svc/fuchsia.sys2.Realm").unwrap(),
dependency_type: DependencyType::Strong,
}))
.runner(RunnerDecl {
name: "elf".into(),
source_path: CapabilityPath::try_from("/svc/runner").unwrap(),
})
.build(),
),
("b", ComponentDeclBuilder::new_empty_component().add_program("hobbit").build()),
];
// Set up the system.
let (runner_service, mut receiver) =
create_service_directory_entry::<fcrunner::ComponentRunnerMarker>();
let universe = RoutingTestBuilder::new("a", components)
// Component "a" exposes a runner service.
.add_outgoing_path("a", CapabilityPath::try_from("/svc/runner").unwrap(), runner_service)
.build()
.await;
universe
.create_dynamic_child(
AbsoluteMoniker::root(),
"coll",
ChildDecl {
name: "b".to_string(),
url: "test:///b".to_string(),
startup: fsys::StartupMode::Lazy,
environment: None,
},
)
.await;
join!(
// Bind "coll:b:1". We expect to see a call to our runner service for the new component.
async move {
universe.bind_instance(&vec!["coll:b:1"].into()).await.unwrap();
},
// Wait for a request, and ensure it has the correct URL.
async move {
assert_eq!(
wait_for_runner_request(&mut receiver).await.resolved_url,
Some("test:///b_resolved".to_string())
);
}
);
}
/// a
/// \
/// b
/// \
/// c
///
/// a: declares runner "elf" as service "/svc/runner" from self.
/// a: offers runner "elf" from self to "b" as "dwarf".
/// b: registers runner "dwarf" from realm in environment as "hobbit".
/// c: uses runner "hobbit".
#[fuchsia::test]
async fn use_runner_from_grandparent_environment() {
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.add_lazy_child("b")
.offer(OfferDecl::Runner(OfferRunnerDecl {
source: OfferSource::Self_,
source_name: CapabilityName("elf".to_string()),
target: OfferTarget::Child("b".to_string()),
target_name: CapabilityName("dwarf".to_string()),
}))
.runner(RunnerDecl {
name: "elf".into(),
source_path: CapabilityPath::try_from("/svc/runner").unwrap(),
})
.build(),
),
(
"b",
ComponentDeclBuilder::new()
.add_child(ChildDeclBuilder::new_lazy_child("c").environment("env").build())
.add_environment(
EnvironmentDeclBuilder::new()
.name("env")
.extends(fsys::EnvironmentExtends::Realm)
.add_runner(RunnerRegistration {
source_name: "dwarf".into(),
source: RegistrationSource::Parent,
target_name: "hobbit".into(),
})
.build(),
)
.build(),
),
("c", ComponentDeclBuilder::new_empty_component().add_program("hobbit").build()),
];
// Set up the system.
let (runner_service, mut receiver) =
create_service_directory_entry::<fcrunner::ComponentRunnerMarker>();
let universe = RoutingTestBuilder::new("a", components)
// Component "a" exposes a runner service.
.add_outgoing_path("a", CapabilityPath::try_from("/svc/runner").unwrap(), runner_service)
.build()
.await;
join!(
// Bind "c:0". We expect to see a call to our runner service for the new component.
async move {
universe.bind_instance(&vec!["b:0", "c:0"].into()).await.unwrap();
},
// Wait for a request, and ensure it has the correct URL.
async move {
assert_eq!(
wait_for_runner_request(&mut receiver).await.resolved_url,
Some("test:///c_resolved".to_string())
);
}
);
}
/// a
/// / \
/// b c
///
/// a: registers runner "dwarf" from "b" in environment as "hobbit".
/// b: exposes runner "elf" as service "/svc/runner" from self as "dwarf".
/// c: uses runner "hobbit".
#[fuchsia::test]
async fn use_runner_from_sibling_environment() {
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.add_lazy_child("b")
.add_child(ChildDeclBuilder::new_lazy_child("c").environment("env").build())
.add_environment(
EnvironmentDeclBuilder::new()
.name("env")
.extends(fsys::EnvironmentExtends::Realm)
.add_runner(RunnerRegistration {
source_name: "dwarf".into(),
source: RegistrationSource::Child("b".into()),
target_name: "hobbit".into(),
})
.build(),
)
.build(),
),
(
"b",
ComponentDeclBuilder::new()
.expose(ExposeDecl::Runner(ExposeRunnerDecl {
source: ExposeSource::Self_,
source_name: CapabilityName("elf".to_string()),
target: ExposeTarget::Parent,
target_name: CapabilityName("dwarf".to_string()),
}))
.runner(RunnerDecl {
name: "elf".into(),
source_path: CapabilityPath::try_from("/svc/runner").unwrap(),
})
.build(),
),
("c", ComponentDeclBuilder::new_empty_component().add_program("hobbit").build()),
];
// Set up the system.
let (runner_service, mut receiver) =
create_service_directory_entry::<fcrunner::ComponentRunnerMarker>();
let universe = RoutingTestBuilder::new("a", components)
// Component "a" exposes a runner service.
.add_outgoing_path("b", CapabilityPath::try_from("/svc/runner").unwrap(), runner_service)
.build()
.await;
join!(
// Bind "c:0". We expect to see a call to our runner service for the new component.
async move {
universe.bind_instance(&vec!["c:0"].into()).await.unwrap();
},
// Wait for a request, and ensure it has the correct URL.
async move {
assert_eq!(
wait_for_runner_request(&mut receiver).await.resolved_url,
Some("test:///c_resolved".to_string())
);
}
);
}
/// a
/// \
/// b
/// \
/// c
///
/// a: declares runner "elf" as service "/svc/runner" from self.
/// a: registers runner "elf" from realm in environment as "hobbit".
/// b: creates environment extending from realm.
/// c: uses runner "hobbit".
#[fuchsia::test]
async fn use_runner_from_inherited_environment() {
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.add_child(ChildDeclBuilder::new_lazy_child("b").environment("env").build())
.add_environment(
EnvironmentDeclBuilder::new()
.name("env")
.extends(fsys::EnvironmentExtends::Realm)
.add_runner(RunnerRegistration {
source_name: "elf".into(),
source: RegistrationSource::Self_,
target_name: "hobbit".into(),
})
.build(),
)
.runner(RunnerDecl {
name: "elf".into(),
source_path: CapabilityPath::try_from("/svc/runner").unwrap(),
})
.build(),
),
(
"b",
ComponentDeclBuilder::new()
.add_child(ChildDeclBuilder::new_lazy_child("c").environment("env").build())
.add_environment(
EnvironmentDeclBuilder::new()
.name("env")
.extends(fsys::EnvironmentExtends::Realm)
.build(),
)
.build(),
),
("c", ComponentDeclBuilder::new_empty_component().add_program("hobbit").build()),
];
// Set up the system.
let (runner_service, mut receiver) =
create_service_directory_entry::<fcrunner::ComponentRunnerMarker>();
let universe = RoutingTestBuilder::new("a", components)
// Component "a" exposes a runner service.
.add_outgoing_path("a", CapabilityPath::try_from("/svc/runner").unwrap(), runner_service)
.build()
.await;
join!(
// Bind "c:0". We expect to see a call to our runner service for the new component.
async move {
universe.bind_instance(&vec!["b:0", "c:0"].into()).await.unwrap();
},
// Wait for a request, and ensure it has the correct URL.
async move {
assert_eq!(
wait_for_runner_request(&mut receiver).await.resolved_url,
Some("test:///c_resolved".to_string())
);
}
);
}
/// a
/// \
/// b
///
/// a: declares runner "elf" with service "/svc/runner" from "self".
/// a: registers runner "elf" from self in environment as "hobbit".
/// b: uses runner "hobbit". Fails because "hobbit" was not in environment.
#[fuchsia::test]
async fn use_runner_from_environment_not_found() {
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.add_child(ChildDeclBuilder::new_lazy_child("b").environment("env").build())
.add_environment(
EnvironmentDeclBuilder::new()
.name("env")
.extends(fsys::EnvironmentExtends::Realm)
.add_runner(RunnerRegistration {
source_name: "elf".into(),
source: RegistrationSource::Self_,
target_name: "dwarf".into(),
})
.build(),
)
.runner(RunnerDecl {
name: "elf".into(),
source_path: CapabilityPath::try_from("/svc/runner").unwrap(),
})
.build(),
),
("b", ComponentDeclBuilder::new_empty_component().add_program("hobbit").build()),
];
// Set up the system.
let (runner_service, _receiver) =
create_service_directory_entry::<fcrunner::ComponentRunnerMarker>();
let universe = RoutingTestBuilder::new("a", components)
// Component "a" exposes a runner service.
.add_outgoing_path("a", CapabilityPath::try_from("/svc/runner").unwrap(), runner_service)
.build()
.await;
// Bind "b:0". We expect it to fail because routing failed.
assert_matches!(
universe.bind_instance(&vec!["b:0"].into()).await,
Err(ModelError::RoutingError {
err: RoutingError::UseFromEnvironmentNotFound {
moniker,
capability_type,
capability_name,
}
})
if moniker == AbsoluteMoniker::from(vec!["b:0"]) &&
capability_type == "runner" &&
capability_name == CapabilityName("hobbit".to_string()));
}
// TODO: Write a test for environment that extends from None. Currently, this is not
// straightforward because resolver routing is not implemented yet, which makes it impossible to
// register a new resolver and have it be usable.
/// a
/// \
/// [b]
/// \
/// c
///
/// a: offers service /svc/foo from self
/// [b]: offers service /svc/foo to c
/// [b]: is destroyed
/// c: uses service /svc/foo, which should fail
#[fuchsia::test]
async fn use_with_destroyed_parent() {
let use_protocol_decl = UseProtocolDecl {
source: UseSource::Parent,
source_name: "foo_svc".into(),
target_path: CapabilityPath::try_from("/svc/hippo").unwrap(),
dependency_type: DependencyType::Strong,
};
let use_decl = UseDecl::Protocol(use_protocol_decl.clone());
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.protocol(ProtocolDeclBuilder::new("foo_svc").build())
.use_(UseDecl::Protocol(UseProtocolDecl {
source: UseSource::Framework,
source_name: "fuchsia.sys2.Realm".into(),
target_path: CapabilityPath::try_from("/svc/fuchsia.sys2.Realm").unwrap(),
dependency_type: DependencyType::Strong,
}))
.offer(OfferDecl::Protocol(OfferProtocolDecl {
source: OfferSource::Self_,
source_name: "foo_svc".into(),
target_name: "foo_svc".into(),
target: OfferTarget::Collection("coll".to_string()),
dependency_type: DependencyType::Strong,
}))
.add_transient_collection("coll")
.build(),
),
(
"b",
ComponentDeclBuilder::new()
.offer(OfferDecl::Protocol(OfferProtocolDecl {
source: OfferSource::Parent,
source_name: "foo_svc".into(),
target_name: "foo_svc".into(),
target: OfferTarget::Child("c".to_string()),
dependency_type: DependencyType::Strong,
}))
.add_lazy_child("c")
.build(),
),
("c", ComponentDeclBuilder::new().use_(use_decl.clone()).build()),
];
let test = RoutingTest::new("a", components).await;
test.create_dynamic_child(
vec![].into(),
"coll",
ChildDecl {
name: "b".to_string(),
url: "test:///b".to_string(),
startup: fsys::StartupMode::Lazy,
environment: None,
},
)
.await;
// Confirm we can use service from "c".
test.check_use(
vec!["coll:b:1", "c:0"].into(),
CheckUse::Protocol { path: default_service_capability(), expected_res: ExpectedResult::Ok },
)
.await;
// Destroy "b", but preserve a reference to "c" so we can route from it below.
let moniker = vec!["coll:b:1", "c:0"].into();
let realm_c = test.model.look_up(&moniker).await.expect("failed to look up realm b");
test.destroy_dynamic_child(vec![].into(), "coll", "b").await;
// Now attempt to route the service from "c". Should fail because "b" does not exist so we
// cannot follow it.
let err = route_capability(RouteRequest::UseProtocol(use_protocol_decl), &realm_c)
.await
.expect_err("routing unexpectedly succeeded");
assert_matches!(
err,
RoutingError::ComponentInstanceError(
ComponentInstanceError::InstanceNotFound { moniker }
) if moniker == vec!["coll:b:1"].into()
);
}
/// a
/// / \
/// b c
///
/// b: exposes directory /data/foo from self as /data/bar
/// a: offers directory /data/bar from b as /data/baz to c, which was destroyed (but not removed
/// from the tree yet)
/// c: uses /data/baz as /data/hippo
#[fuchsia::test]
async fn use_from_destroyed_but_not_removed() {
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.offer(OfferDecl::Protocol(OfferProtocolDecl {
source: OfferSource::Child("b".to_string()),
source_name: "bar_svc".into(),
target_name: "baz_svc".into(),
target: OfferTarget::Child("c".to_string()),
dependency_type: DependencyType::Strong,
}))
.add_lazy_child("b")
.add_lazy_child("c")
.build(),
),
(
"b",
ComponentDeclBuilder::new()
.directory(DirectoryDeclBuilder::new("foo_data").build())
.protocol(ProtocolDeclBuilder::new("foo_svc").build())
.expose(ExposeDecl::Protocol(ExposeProtocolDecl {
source: ExposeSource::Self_,
source_name: "foo_svc".into(),
target_name: "bar_svc".into(),
target: ExposeTarget::Parent,
}))
.build(),
),
(
"c",
ComponentDeclBuilder::new()
.use_(UseDecl::Protocol(UseProtocolDecl {
source: UseSource::Parent,
source_name: "baz_svc".into(),
target_path: CapabilityPath::try_from("/svc/hippo").unwrap(),
dependency_type: DependencyType::Strong,
}))
.build(),
),
];
let test = RoutingTest::new("a", components).await;
let component_b =
test.model.look_up(&vec!["b:0"].into()).await.expect("failed to look up realm b");
// Destroy `b` but keep alive its reference from the parent.
// TODO: If we had a "pre-destroy" event we could delete the child through normal means and
// block on the event instead of explicitly registering actions.
ActionSet::register(component_b.clone(), ShutdownAction::new()).await.expect("shutdown failed");
ActionSet::register(component_b, PurgeAction::new()).await.expect("destroy failed");
test.check_use(
vec!["c:0"].into(),
CheckUse::Protocol {
path: default_service_capability(),
expected_res: ExpectedResult::Err(zx::Status::UNAVAILABLE),
},
)
.await;
}
/// a
/// / \
/// b c
///
/// a: creates environment "env" and registers resolver "base" from c.
/// b: resolved by resolver "base" through "env".
/// b: exposes resolver "base" from self.
#[fuchsia::test]
async fn use_resolver_from_parent_environment() {
// Note that we do not define a component "b". This will be resolved by our custom resolver.
let components = vec![
(
"a",
ComponentDeclBuilder::new_empty_component()
.add_child(ChildDeclBuilder::new().name("b").url("base://b").environment("env"))
.add_child(ChildDeclBuilder::new_lazy_child("c"))
.add_environment(
EnvironmentDeclBuilder::new()
.name("env")
.extends(fsys::EnvironmentExtends::Realm)
.add_resolver(ResolverRegistration {
resolver: "base".into(),
source: RegistrationSource::Child("c".into()),
scheme: "base".into(),
}),
)
.build(),
),
(
"c",
ComponentDeclBuilder::new()
.expose(ExposeDecl::Resolver(ExposeResolverDecl {
source: ExposeSource::Self_,
source_name: "base".into(),
target: ExposeTarget::Parent,
target_name: "base".into(),
}))
.resolver(ResolverDecl {
name: "base".into(),
source_path: "/svc/fuchsia.sys2.ComponentResolver".parse().unwrap(),
})
.build(),
),
];
// Set up the system.
let (resolver_service, mut receiver) =
create_service_directory_entry::<fsys::ComponentResolverMarker>();
let universe = RoutingTestBuilder::new("a", components)
// Component "c" exposes a resolver service.
.add_outgoing_path(
"c",
CapabilityPath::try_from("/svc/fuchsia.sys2.ComponentResolver").unwrap(),
resolver_service,
)
.build()
.await;
join!(
// Bind "b:0". We expect to see a call to our resolver service for the new component.
async move {
universe
.bind_instance(&vec!["b:0"].into())
.await
.expect("failed to bind to instance b:0");
},
// Wait for a request, and resolve it.
async {
while let Some(fsys::ComponentResolverRequest::Resolve { component_url, responder }) =
receiver.next().await
{
assert_eq!(component_url, "base://b");
responder
.send(&mut Ok(fsys::Component {
resolved_url: Some("test://b".into()),
decl: Some(fmem::Data::Bytes(
fidl::encoding::encode_persistent(
&mut default_component_decl().native_into_fidl(),
)
.unwrap(),
)),
package: None,
..fsys::Component::EMPTY
}))
.expect("failed to send resolve response");
}
}
);
}
/// a
/// \
/// b
/// \
/// c
/// a: creates environment "env" and registers resolver "base" from self.
/// b: has environment "env".
/// c: is resolved by resolver from grandarent.
#[fuchsia::test]
async fn use_resolver_from_grandparent_environment() {
// Note that we do not define a component "c". This will be resolved by our custom resolver.
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.add_child(ChildDeclBuilder::new_lazy_child("b").environment("env"))
.add_environment(
EnvironmentDeclBuilder::new()
.name("env")
.extends(fsys::EnvironmentExtends::Realm)
.add_resolver(ResolverRegistration {
resolver: "base".into(),
source: RegistrationSource::Self_,
scheme: "base".into(),
}),
)
.resolver(ResolverDecl {
name: "base".into(),
source_path: "/svc/fuchsia.sys2.ComponentResolver".parse().unwrap(),
})
.build(),
),
(
"b",
ComponentDeclBuilder::new_empty_component()
.add_child(ChildDeclBuilder::new().name("c").url("base://c"))
.build(),
),
];
|
// Set up the system.
let (resolver_service, mut receiver) =
create_service_directory_entry::<fsys::ComponentResolverMarker>();
let universe = RoutingTestBuilder::new("a", components)
// Component "c" exposes a resolver service.
.add_outgoing_path(
"a",
CapabilityPath::try_from("/svc/fuchsia.sys2.ComponentResolver").unwrap(),
resolver_service,
)
.build()
.await;
join!(
// Bind "c:0". We expect to see a call to our resolver service for the new component.
async move {
universe
.bind_instance(&vec!["b:0", "c:0"].into())
.await
.expect("failed to bind to instance c:0");
},
// Wait for a request, and resolve it.
async {
while let Some(fsys::ComponentResolverRequest::Resolve { component_url, responder }) =
receiver.next().await
{
assert_eq!(component_url, "base://c");
responder
.send(&mut Ok(fsys::Component {
resolved_url: Some("test://c".into()),
decl: Some(fmem::Data::Bytes(
fidl::encoding::encode_persistent(
&mut default_component_decl().native_into_fidl(),
)
.unwrap(),
)),
package: None,
..fsys::Component::EMPTY
}))
.expect("failed to send resolve response");
}
}
);
}
/// a
/// / \
/// b c
/// a: creates environment "env" and registers resolver "base" from self.
/// b: has environment "env".
/// c: does NOT have environment "env".
#[fuchsia::test]
async fn resolver_is_not_available() {
// Note that we do not define a component "b" or "c". This will be resolved by our custom resolver.
let components = vec![(
"a",
ComponentDeclBuilder::new()
.add_child(ChildDeclBuilder::new().name("b").url("base://b").environment("env"))
.add_child(ChildDeclBuilder::new().name("c").url("base://c"))
.add_environment(
EnvironmentDeclBuilder::new()
.name("env")
.extends(fsys::EnvironmentExtends::Realm)
.add_resolver(ResolverRegistration {
resolver: "base".into(),
source: RegistrationSource::Self_,
scheme: "base".into(),
}),
)
.resolver(ResolverDecl {
name: "base".into(),
source_path: "/svc/fuchsia.sys2.ComponentResolver".parse().unwrap(),
})
.build(),
)];
// Set up the system.
let (resolver_service, mut receiver) =
create_service_directory_entry::<fsys::ComponentResolverMarker>();
let universe = RoutingTestBuilder::new("a", components)
// Component "c" exposes a resolver service.
.add_outgoing_path(
"a",
CapabilityPath::try_from("/svc/fuchsia.sys2.ComponentResolver").unwrap(),
resolver_service,
)
.build()
.await;
join!(
// Bind "c:0". We expect to see a failure that the scheme is not registered.
async move {
match universe.bind_instance(&vec!["c:0"].into()).await {
Err(ModelError::ComponentInstanceError {
err: ComponentInstanceError::ResolveFailed { err: resolve_error, .. },
}) => {
assert_eq!(
resolve_error.to_string(),
"failed to resolve \"base://c\": scheme not registered"
);
}
_ => {
panic!("expected ModelError wrapping ComponentInstanceError::ResolveFailed");
}
};
},
// Wait for a request, and resolve it.
async {
while let Some(fsys::ComponentResolverRequest::Resolve { component_url, responder }) =
receiver.next().await
{
assert_eq!(component_url, "base://b");
responder
.send(&mut Ok(fsys::Component {
resolved_url: Some("test://b".into()),
decl: Some(fmem::Data::Bytes(
fidl::encoding::encode_persistent(
&mut default_component_decl().native_into_fidl(),
)
.unwrap(),
)),
package: None,
..fsys::Component::EMPTY
}))
.expect("failed to send resolve response");
}
}
);
}
/// a
/// /
/// b
/// a: creates environment "env" and registers resolver "base" from self.
/// b: has environment "env".
#[fuchsia::test]
async fn resolver_component_decl_is_validated() {
// Note that we do not define a component "b". This will be resolved by our custom resolver.
let components = vec![(
"a",
ComponentDeclBuilder::new()
.add_child(ChildDeclBuilder::new().name("b").url("base://b").environment("env"))
.add_environment(
EnvironmentDeclBuilder::new()
.name("env")
.extends(fsys::EnvironmentExtends::Realm)
.add_resolver(ResolverRegistration {
resolver: "base".into(),
source: RegistrationSource::Self_,
scheme: "base".into(),
}),
)
.resolver(ResolverDecl {
name: "base".into(),
source_path: "/svc/fuchsia.sys2.ComponentResolver".parse().unwrap(),
})
.build(),
)];
// Set up the system.
let (resolver_service, mut receiver) =
create_service_directory_entry::<fsys::ComponentResolverMarker>();
let universe = RoutingTestBuilder::new("a", components)
// Component "a" exposes a resolver service.
.add_outgoing_path(
"a",
CapabilityPath::try_from("/svc/fuchsia.sys2.ComponentResolver").unwrap(),
resolver_service,
)
.build()
.await;
join!(
// Bind "b:0". We expect to see a ResolverError.
async move {
match universe.bind_instance(&vec!["b:0"].into()).await {
Err(ModelError::ComponentInstanceError {
err: ComponentInstanceError::ResolveFailed { err: resolve_error, .. },
}) => {
assert!(resolve_error
.to_string()
.starts_with("failed to resolve \"base://b\": component manifest invalid"));
}
_ => {
panic!("expected ModelError wrapping ComponentInstanceError::ResolveFailed");
}
};
},
// Wait for a request, and resolve it.
async {
while let Some(fsys::ComponentResolverRequest::Resolve { component_url, responder }) =
receiver.next().await
{
assert_eq!(component_url, "base://b");
responder
.send(&mut Ok(fsys::Component {
resolved_url: Some("test://b".into()),
decl: Some(fmem::Data::Bytes({
let mut fidl = fsys::ComponentDecl {
exposes: Some(vec![fsys::ExposeDecl::Protocol(
fsys::ExposeProtocolDecl {
source: Some(fsys::Ref::Self_(fsys::SelfRef {})),
..fsys::ExposeProtocolDecl::EMPTY
},
)]),
..fsys::ComponentDecl::EMPTY
};
fidl::encoding::encode_persistent(&mut fidl).unwrap()
})),
package: None,
..fsys::Component::EMPTY
}))
.expect("failed to send resolve response");
}
}
);
}
/// a
/// \
/// b
///
/// b: uses framework events "started", and "capability_requested".
/// Capability policy denies the route from being allowed for started but
/// not for capability_requested.
#[fuchsia::test]
async fn use_event_from_framework_denied_by_capabiilty_policy() {
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.offer(OfferDecl::Protocol(OfferProtocolDecl {
source: OfferSource::Parent,
source_name: "fuchsia.sys2.EventSource".try_into().unwrap(),
target_name: "fuchsia.sys2.EventSource".try_into().unwrap(),
target: OfferTarget::Child("b".to_string()),
dependency_type: DependencyType::Strong,
}))
.add_lazy_child("b")
.build(),
),
(
"b",
ComponentDeclBuilder::new()
.use_(UseDecl::Protocol(UseProtocolDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Parent,
source_name: "fuchsia.sys2.EventSource".try_into().unwrap(),
target_path: "/svc/fuchsia.sys2.EventSource".try_into().unwrap(),
}))
.use_(UseDecl::Event(UseEventDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Framework,
source_name: "capability_requested".into(),
target_name: "capability_requested".into(),
filter: None,
mode: cm_rust::EventMode::Async,
}))
.use_(UseDecl::Event(UseEventDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Framework,
source_name: "started".into(),
target_name: "started".into(),
filter: None,
mode: cm_rust::EventMode::Async,
}))
.use_(UseDecl::Event(UseEventDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Framework,
source_name: "resolved".into(),
target_name: "resolved".into(),
filter: None,
mode: cm_rust::EventMode::Sync,
}))
.use_(UseDecl::EventStream(UseEventStreamDecl {
name: CapabilityName::try_from("StartComponentTree").unwrap(),
subscriptions: vec![cm_rust::EventSubscription {
event_name: "resolved".into(),
mode: cm_rust::EventMode::Sync,
}],
}))
.build(),
),
];
let mut allowlist = HashSet::new();
allowlist.insert(AllowlistEntry::Exact(AbsoluteMoniker::from(vec!["b:0"])));
let test = RoutingTestBuilder::new("a", components)
.add_capability_policy(
CapabilityAllowlistKey {
source_moniker: ExtendedMoniker::ComponentInstance(AbsoluteMoniker::from(vec![
"b:0",
])),
source_name: CapabilityName::from("started"),
source: CapabilityAllowlistSource::Framework,
capability: CapabilityTypeName::Event,
},
HashSet::new(),
)
.add_capability_policy(
CapabilityAllowlistKey {
source_moniker: ExtendedMoniker::ComponentInstance(AbsoluteMoniker::from(vec![
"b:0",
])),
source_name: CapabilityName::from("capability_requested"),
source: CapabilityAllowlistSource::Framework,
capability: CapabilityTypeName::Event,
},
allowlist,
)
.build()
.await;
test.check_use(
vec!["b:0"].into(),
CheckUse::Event {
requests: vec![EventSubscription::new("capability_requested".into(), EventMode::Async)],
expected_res: ExpectedResult::Ok,
},
)
.await;
test.check_use(
vec!["b:0"].into(),
CheckUse::Event {
requests: vec![EventSubscription::new("started".into(), EventMode::Async)],
expected_res: ExpectedResult::Err(zx::Status::ACCESS_DENIED),
},
)
.await
}
// a
// \
// b
//
// a: exposes "foo" to parent from child
// b: exposes "foo" to parent from self
#[fuchsia::test]
async fn route_protocol_from_expose() {
let expose_decl = ExposeProtocolDecl {
source: ExposeSource::Child("b".into()),
source_name: "foo".into(),
target_name: "foo".into(),
target: ExposeTarget::Parent,
};
let expected_protocol_decl =
ProtocolDecl { name: "foo".into(), source_path: "/svc/foo".parse().unwrap() };
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.expose(ExposeDecl::Protocol(expose_decl.clone()))
.add_lazy_child("b")
.build(),
),
(
"b",
ComponentDeclBuilder::new()
.expose(ExposeDecl::Protocol(ExposeProtocolDecl {
source: ExposeSource::Self_,
source_name: "foo".into(),
target_name: "foo".into(),
target: ExposeTarget::Parent,
}))
.protocol(expected_protocol_decl.clone())
.build(),
),
];
let test = RoutingTestBuilder::new("a", components).build().await;
let root_instance = test.model.look_up(&AbsoluteMoniker::root()).await.expect("root instance");
let expected_source_moniker = AbsoluteMoniker::parse_string_without_instances("/b").unwrap();
assert_matches!(
route_capability(RouteRequest::ExposeProtocol(expose_decl), &root_instance).await,
Ok(RouteSource::Protocol(
CapabilitySource::Component {
capability: ComponentCapability::Protocol(protocol_decl),
component,
})
) if protocol_decl == expected_protocol_decl && component.moniker == expected_source_moniker
);
}
/// a
/// /
/// b
///
/// a: offer to b from self
/// b: use from parent
#[fuchsia::test]
async fn use_service_from_parent() {
let use_decl = UseServiceDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Parent,
source_name: "foo".into(),
target_path: CapabilityPath::try_from("/foo").unwrap(),
};
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.offer(OfferDecl::Service(OfferServiceDecl {
source: OfferSource::Self_,
source_name: "foo".into(),
target_name: "foo".into(),
target: OfferTarget::Child("b".to_string()),
}))
.service(ServiceDecl {
name: "foo".into(),
source_path: "/svc/foo".try_into().unwrap(),
})
.add_lazy_child("b")
.build(),
),
("b", ComponentDeclBuilder::new().use_(use_decl.clone().into()).build()),
];
let test = RoutingTestBuilder::new("a", components).build().await;
let b_component = test.model.look_up(&vec!["b:0"].into()).await.expect("b instance");
let a_component = test.model.look_up(&AbsoluteMoniker::root()).await.expect("root instance");
let source = route_capability(RouteRequest::UseService(use_decl), &b_component)
.await
.expect("failed to route service");
match source {
RouteSource::Service(CapabilitySource::Component {
capability: ComponentCapability::Service(ServiceDecl { name, source_path }),
component,
}) => {
assert_eq!(name, CapabilityName("foo".into()));
assert_eq!(source_path, "/svc/foo".parse::<CapabilityPath>().unwrap());
assert!(Arc::ptr_eq(&component.upgrade().unwrap(), &a_component));
}
_ => panic!("bad capability source"),
};
}
/// a
/// /
/// b
///
/// a: use from #b
/// b: expose to parent from self
#[fuchsia::test]
async fn use_service_from_child() {
let use_decl = UseServiceDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Child("b".to_string()),
source_name: "foo".into(),
target_path: CapabilityPath::try_from("/foo").unwrap(),
};
let components = vec![
(
"a",
ComponentDeclBuilder::new().use_(use_decl.clone().into()).add_lazy_child("b").build(),
),
(
"b",
ComponentDeclBuilder::new()
.service(ServiceDecl {
name: "foo".into(),
source_path: "/svc/foo".try_into().unwrap(),
})
.expose(ExposeDecl::Service(ExposeServiceDecl {
source: ExposeSource::Self_,
source_name: "foo".into(),
target_name: "foo".into(),
target: ExposeTarget::Parent,
}))
.build(),
),
];
let test = RoutingTestBuilder::new("a", components).build().await;
let a_component = test.model.look_up(&AbsoluteMoniker::root()).await.expect("root instance");
let b_component = test.model.look_up(&vec!["b:0"].into()).await.expect("b instance");
let source = route_capability(RouteRequest::UseService(use_decl), &a_component)
.await
.expect("failed to route service");
match source {
RouteSource::Service(CapabilitySource::Component {
capability: ComponentCapability::Service(ServiceDecl { name, source_path }),
component,
}) => {
assert_eq!(name, CapabilityName("foo".into()));
assert_eq!(source_path, "/svc/foo".parse::<CapabilityPath>().unwrap());
assert!(Arc::ptr_eq(&component.upgrade().unwrap(), &b_component));
}
_ => panic!("bad capability source"),
};
}
/// a
/// / \
/// b c
///
/// a: offer to b from child c
/// b: use from parent
/// c: expose from self
#[fuchsia::test]
async fn route_service_from_sibling() {
let use_decl = UseServiceDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Parent,
source_name: "foo".into(),
target_path: CapabilityPath::try_from("/foo").unwrap(),
};
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.offer(OfferDecl::Service(OfferServiceDecl {
source: OfferSource::Child("c".into()),
source_name: "foo".into(),
target_name: "foo".into(),
target: OfferTarget::Child("b".to_string()),
}))
.add_lazy_child("b")
.add_lazy_child("c")
.build(),
),
("b", ComponentDeclBuilder::new().use_(use_decl.clone().into()).build()),
(
"c",
ComponentDeclBuilder::new()
.expose(ExposeDecl::Service(ExposeServiceDecl {
source: ExposeSource::Self_,
source_name: "foo".into(),
target_name: "foo".into(),
target: ExposeTarget::Parent,
}))
.service(ServiceDecl {
name: "foo".into(),
source_path: "/svc/foo".try_into().unwrap(),
})
.build(),
),
];
let test = RoutingTestBuilder::new("a", components).build().await;
let b_component = test.model.look_up(&vec!["b:0"].into()).await.expect("b instance");
let c_component = test.model.look_up(&vec!["c:0"].into()).await.expect("c instance");
let source = route_capability(RouteRequest::UseService(use_decl), &b_component)
.await
.expect("failed to route service");
// Verify this source comes from `c`.
match source {
RouteSource::Service(CapabilitySource::Component {
capability: ComponentCapability::Service(ServiceDecl { name, source_path }),
component,
}) => {
assert_eq!(name, CapabilityName("foo".into()));
assert_eq!(source_path, "/svc/foo".parse::<CapabilityPath>().unwrap());
assert!(Arc::ptr_eq(&component.upgrade().unwrap(), &c_component));
}
_ => panic!("bad capability source"),
};
}
#[fuchsia::test]
async fn route_service_from_parent_collection() {
let use_decl = UseServiceDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Parent,
source_name: "foo".into(),
target_path: CapabilityPath::try_from("/foo").unwrap(),
};
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.offer(OfferDecl::Service(OfferServiceDecl {
source: OfferSource::Collection("coll".to_string()),
source_name: "foo".into(),
target_name: "foo".into(),
target: OfferTarget::Child("b".to_string()),
}))
.add_collection(CollectionDeclBuilder::new_transient_collection("coll"))
.add_lazy_child("b")
.build(),
),
("b", ComponentDeclBuilder::new().use_(use_decl.clone().into()).build()),
];
let test = RoutingTestBuilder::new("a", components).build().await;
let b_component = test.model.look_up(&vec!["b:0"].into()).await.expect("b instance");
let a_component = test.model.look_up(&AbsoluteMoniker::root()).await.expect("root instance");
let source = route_capability(RouteRequest::UseService(use_decl), &b_component)
.await
.expect("failed to route service");
match source {
RouteSource::Service(CapabilitySource::Collection {
collection_name,
source_name,
component,
..
}) => {
assert_eq!(collection_name, "coll");
assert_eq!(source_name, CapabilityName("foo".into()));
assert!(Arc::ptr_eq(&component.upgrade().unwrap(), &a_component));
}
_ => panic!("bad capability source"),
};
}
#[fuchsia::test]
async fn list_service_instances_from_collection() {
let use_decl = UseServiceDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Parent,
source_name: "foo".into(),
target_path: CapabilityPath::try_from("/foo").unwrap(),
};
let components = vec![
(
"root",
ComponentDeclBuilder::new()
.use_(UseDecl::Protocol(UseProtocolDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Framework,
source_name: "fuchsia.sys2.Realm".into(),
target_path: CapabilityPath::try_from("/svc/fuchsia.sys2.Realm").unwrap(),
}))
.offer(OfferDecl::Service(OfferServiceDecl {
source: OfferSource::Collection("coll".to_string()),
source_name: "foo".into(),
target_name: "foo".into(),
target: OfferTarget::Child("client".to_string()),
}))
.add_collection(CollectionDeclBuilder::new_transient_collection("coll"))
.add_lazy_child("client")
.build(),
),
("client", ComponentDeclBuilder::new().use_(use_decl.clone().into()).build()),
(
"service_child_a",
ComponentDeclBuilder::new()
.expose(ExposeDecl::Service(ExposeServiceDecl {
source: ExposeSource::Self_,
source_name: "foo".into(),
target: ExposeTarget::Parent,
target_name: "foo".into(),
}))
.service(ServiceDecl {
name: "foo".into(),
source_path: "/svc/foo".try_into().unwrap(),
})
.build(),
),
(
"service_child_b",
ComponentDeclBuilder::new()
.expose(ExposeDecl::Service(ExposeServiceDecl {
source: ExposeSource::Self_,
source_name: "foo".into(),
target: ExposeTarget::Parent,
target_name: "foo".into(),
}))
.service(ServiceDecl {
name: "foo".into(),
source_path: "/svc/foo".try_into().unwrap(),
})
.build(),
),
("non_service_child", ComponentDeclBuilder::new().build()),
];
let test = RoutingTestBuilder::new("root", components).build().await;
// Start a few dynamic children in the collection "coll".
test.create_dynamic_child(
AbsoluteMoniker::root(),
"coll",
ChildDeclBuilder::new_lazy_child("service_child_a"),
)
.await;
test.create_dynamic_child(
AbsoluteMoniker::root(),
"coll",
ChildDeclBuilder::new_lazy_child("non_service_child"),
)
.await;
test.create_dynamic_child(
AbsoluteMoniker::root(),
"coll",
ChildDeclBuilder::new_lazy_child("service_child_b"),
)
.await;
let client_component =
test.model.look_up(&vec!["client:0"].into()).await.expect("client instance");
let source = route_capability(RouteRequest::UseService(use_decl), &client_component)
.await
.expect("failed to route service");
let capability_provider = match source {
RouteSource::Service(CapabilitySource::Collection { capability_provider, .. }) => {
capability_provider
}
_ => panic!("bad capability source"),
};
// Check that only the instances that expose the service are listed.
let instances: HashSet<String> =
capability_provider.list_instances().await.unwrap().into_iter().collect();
assert_eq!(instances.len(), 2);
assert!(instances.contains("service_child_a"));
assert!(instances.contains("service_child_b"));
// Try routing to one of the instances.
let source = capability_provider
.route_instance("service_child_a")
.await
.expect("failed to route to child");
match source {
CapabilitySource::Component {
capability: ComponentCapability::Service(ServiceDecl { name, source_path }),
component,
} => {
assert_eq!(name, CapabilityName("foo".into()));
assert_eq!(source_path, "/svc/foo".parse::<CapabilityPath>().unwrap());
assert_eq!(component.moniker, vec!["coll:service_child_a:1"].into());
}
_ => panic!("bad child capability source"),
}
}
/// a
/// / \
/// b c
///
/// a: offer service from c to b
/// b: use service
/// c: expose service from collection
#[fuchsia::test]
async fn use_service_from_sibling_collection() {
let components = vec![
(
"a",
ComponentDeclBuilder::new()
.offer(OfferDecl::Service(OfferServiceDecl {
source: OfferSource::Child("c".to_string()),
source_name: "my.service.Service".into(),
target: OfferTarget::Child("b".to_string()),
target_name: "my.service.Service".into(),
}))
.add_child(ChildDeclBuilder::new_lazy_child("b"))
.add_child(ChildDeclBuilder::new_lazy_child("c"))
.build(),
),
(
"b",
ComponentDeclBuilder::new()
.use_(UseDecl::Service(UseServiceDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Parent,
source_name: "my.service.Service".into(),
target_path: "/svc/my.service.Service".try_into().unwrap(),
}))
.build(),
),
(
"c",
ComponentDeclBuilder::new()
.use_(UseDecl::Protocol(UseProtocolDecl {
dependency_type: DependencyType::Strong,
source: UseSource::Framework,
source_name: "fuchsia.sys2.Realm".into(),
target_path: "/svc/fuchsia.sys2.Realm".try_into().unwrap(),
}))
.expose(ExposeDecl::Service(ExposeServiceDecl {
source: ExposeSource::Collection("coll".to_string()),
source_name: "my.service.Service".into(),
target_name: "my.service.Service".into(),
target: ExposeTarget::Parent,
}))
.add_collection(CollectionDeclBuilder::new_transient_collection("coll"))
.build(),
),
(
"foo",
ComponentDeclBuilder::new()
.expose(ExposeDecl::Service(ExposeServiceDecl {
source: ExposeSource::Self_,
source_name: "my.service.Service".into(),
target_name: "my.service.Service".into(),
target: ExposeTarget::Parent,
}))
.service(ServiceDecl {
name: "my.service.Service".into(),
source_path: "/svc/my.service.Service".try_into().unwrap(),
})
.build(),
),
(
"bar",
ComponentDeclBuilder::new()
.expose(ExposeDecl::Service(ExposeServiceDecl {
source: ExposeSource::Self_,
source_name: "my.service.Service".into(),
target_name: "my.service.Service".into(),
target: ExposeTarget::Parent,
}))
.service(ServiceDecl {
name: "my.service.Service".into(),
source_path: "/svc/my.service.Service".try_into().unwrap(),
})
.build(),
),
("baz", ComponentDeclBuilder::new().build()),
];
let (directory_entry, mut receiver) = create_service_directory_entry::<echo::EchoMarker>();
let instance_dir = pseudo_directory! {
"echo" => directory_entry,
};
let test = RoutingTestBuilder::new("a", components)
.add_outgoing_path(
"foo",
"/svc/my.service.Service/default".try_into().unwrap(),
instance_dir,
)
.build()
.await;
// Populate the collection with dynamic children.
test.create_dynamic_child(vec!["c:0"].into(), "coll", ChildDeclBuilder::new_lazy_child("foo"))
.await;
test.create_dynamic_child(vec!["c:0"].into(), "coll", ChildDeclBuilder::new_lazy_child("bar"))
.await;
test.create_dynamic_child(vec!["c:0"].into(), "coll", ChildDeclBuilder::new_lazy_child("baz"))
.await;
let target: AbsoluteMoniker = vec!["b:0"].into();
let namespace = test.bind_and_get_namespace(target.clone()).await;
let dir = capability_util::take_dir_from_namespace(&namespace, "/svc").await;
let service_dir = io_util::directory::open_directory(
&dir,
"my.service.Service",
io_util::OPEN_RIGHT_READABLE | io_util::OPEN_RIGHT_WRITABLE,
)
.await
.expect("failed to open service");
let entries: HashSet<String> = files_async::readdir(&service_dir)
.await
.expect("failed to read entries")
.into_iter()
.map(|d| d.name)
.collect();
assert_eq!(entries.len(), 2);
assert!(entries.contains("foo"));
assert!(entries.contains("bar"));
capability_util::add_dir_to_namespace(&namespace, "/svc", dir).await;
join!(
async move {
test.check_use(
target.clone(),
CheckUse::Service {
path: "/svc/my.service.Service".try_into().unwrap(),
instance: "foo/default".to_string(),
member: "echo".to_string(),
expected_res: ExpectedResult::Ok,
},
)
.await;
},
async move {
while let Some(echo::EchoRequest::EchoString { value, responder }) =
receiver.next().await
{
responder.send(value.as_ref().map(|v| v.as_str())).expect("failed to send reply")
}
}
);
}
| |
settings.go
|
package node
import (
"encoding/hex"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/host"
"github.com/celestiaorg/celestia-node/core"
"github.com/celestiaorg/celestia-node/node/fxutil"
"github.com/celestiaorg/celestia-node/node/p2p"
)
// Option for Node's Config.
type Option func(*Config, *settings) error
// WithP2PKey sets custom Ed25519 private key for p2p networking.
func WithP2PKey(key crypto.PrivKey) Option {
return func(cfg *Config, sets *settings) (_ error) {
sets.P2PKey = key
return
}
}
// WithP2PKeyStr sets custom hex encoded Ed25519 private key for p2p networking.
func WithP2PKeyStr(key string) Option
|
// WithHost sets custom Host's data for p2p networking.
func WithHost(host host.Host) Option {
return func(cfg *Config, sets *settings) (_ error) {
sets.Host = host
return
}
}
// WithCoreClient sets custom client for core process
func WithCoreClient(client core.Client) Option {
return func(cfg *Config, sets *settings) (_ error) {
sets.CoreClient = client
return
}
}
// settings store all the non Config values that can be altered for Node with Options.
type settings struct {
P2PKey crypto.PrivKey
Host p2p.HostBase
CoreClient core.Client
}
// overrides collects all the custom Modules and Components set to be overridden for the Node.
// TODO(@Bidon15): Pass settings instead of overrides func. Issue #300
func (sets *settings) overrides() fxutil.Option {
return fxutil.OverrideSupply(
&sets.P2PKey,
&sets.Host,
&sets.CoreClient,
)
}
|
{
return func(cfg *Config, sets *settings) (_ error) {
decKey, err := hex.DecodeString(key)
if err != nil {
return err
}
key, err := crypto.UnmarshalEd25519PrivateKey(decKey)
if err != nil {
return err
}
sets.P2PKey = key
return
}
}
|
redis_transaction_test.go
|
package redis_test
import "testing"
func
|
(t *testing.T) {
r := NewTest(t)
r.RunTest(e.Multi).ExpectSuccess()
r.RunTest(e.Set, "a", "1").ExpectSuccess()
r.RunTest(e.Set, "b", "2").ExpectSuccess()
r.RunTest(e.Incr, "a").ExpectSuccess()
r.RunTest(e.Scan().ALL)
r.RunTest(e.HSet, "hash", "ha", "1").ExpectSuccess()
r.RunTest(e.HSet, "hash", "hb", "2").ExpectSuccess()
r.RunTest(e.HScan("hash").ALL)
r.RunTest(e.Exec).ExpectSuccess()
}
|
TestTransaction
|
predict_functions.py
|
# python imports
import numpy as np
from PIL import Image
import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from collections import OrderedDict
from sys import exit
# File containing all of the functions used in the predict program
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
if checkpoint["arch"] == 'VGG':
model = models.vgg16(pretrained=True)
elif checkpoint["arch"] == 'Densenet':
model = models.densenet121(pretrained=True)
else:
print("Unsupported arch used in checkpoint")
exit(1)
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = checkpoint['class_to_idx']
# Load classifier from checkpoint
classifier = checkpoint['classifier']
model.classifier = classifier
model.load_state_dict(checkpoint['model_state_dict'])
return model
def process_image(image_path):
|
def predict(image_path, model, topk, gpu):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
image = process_image(image_path)
if gpu:
model.to('cuda')
image = torch.from_numpy(image).type(torch.cuda.FloatTensor)
else:
model.to('cpu')
image = torch.from_numpy(image).type(torch.FloatTensor)
# Returns a new tensor with a dimension of size one inserted at the specified position.
image = image.unsqueeze(0)
output = model.forward(image)
probabilities = torch.exp(output)
# Probabilities and the indices of those probabilities corresponding to the classes
top_probabilities, top_indices = probabilities.topk(topk)
# Convert to lists
top_probabilities = top_probabilities.detach().type(torch.FloatTensor).numpy().tolist()[0]
top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0]
# Convert topk_indices to the actual class labels using class_to_idx
# Invert the dictionary so you get a mapping from index to class.
idx_to_class = {value: key for key, value in model.class_to_idx.items()}
#print(idx_to_class)
top_classes = [idx_to_class[index] for index in top_indices]
return top_probabilities, top_classes
|
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# Process a PIL image for use in a PyTorch model
pil_image = Image.open(image_path)
# Resize
if pil_image.size[0] > pil_image.size[1]:
pil_image.thumbnail((5000, 256))
else:
pil_image.thumbnail((256, 5000))
# Crop
left_margin = (pil_image.width-224)/2
bottom_margin = (pil_image.height-224)/2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
pil_image = pil_image.crop((left_margin, bottom_margin, right_margin, top_margin))
# Normalize
np_image = np.array(pil_image)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image - mean) / std
# PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array
# Color channel needs to be first; retain the order of the other two dimensions.
np_image = np_image.transpose((2, 0, 1))
return np_image
|
zip.rs
|
use crate::fold::Fold;
use crate::*;
use std::fmt::Debug;
use std::sync::Arc;
/// When we zip types, we basically traverse the structure, ensuring
/// that it matches. When we come to types/lifetimes, we invoke the
/// callback methods in the zipper to match them up. Primarily used
/// during unification or similar operations.
///
/// So e.g. if you had `A: Eq<B>` zipped with `X: Eq<Y>`, then the zipper
/// would get two callbacks, one pairing `A` and `X`, and the other pairing
/// `B` and `Y`.
///
/// For things other than types/lifetimes, the zip impls will
/// guarantee equality. So e.g. if you have `A: Eq<B>` zipped with `X:
/// Ord<Y>`, you would wind up with an error, no matter what zipper
/// you are using. This is because the traits `Eq` and `Ord` are
/// represented by two distinct `ItemId` values, and the impl for
/// `ItemId` requires that all `ItemId` in the two zipped values match
/// up.
pub trait Zipper<'i, I: Interner> {
/// Indicates that the two types `a` and `b` were found in matching spots.
fn zip_tys(&mut self, a: &Ty<I>, b: &Ty<I>) -> Fallible<()>;
/// Indicates that the two lifetimes `a` and `b` were found in matching spots.
fn zip_lifetimes(&mut self, a: &Lifetime<I>, b: &Lifetime<I>) -> Fallible<()>;
/// Indicates that the two consts `a` and `b` were found in matching spots.
fn zip_consts(&mut self, a: &Const<I>, b: &Const<I>) -> Fallible<()>;
/// Zips two values appearing beneath binders.
fn zip_binders<T>(&mut self, a: &Binders<T>, b: &Binders<T>) -> Fallible<()>
where
T: HasInterner<Interner = I> + Zip<I> + Fold<I, I, Result = T>;
/// Retreives the interner from the underlying zipper object
fn interner(&self) -> &'i I;
}
impl<'f, 'i, Z, I> Zipper<'i, I> for &'f mut Z
where
I: Interner,
Z: Zipper<'i, I>,
{
fn zip_tys(&mut self, a: &Ty<I>, b: &Ty<I>) -> Fallible<()> {
(**self).zip_tys(a, b)
}
fn zip_lifetimes(&mut self, a: &Lifetime<I>, b: &Lifetime<I>) -> Fallible<()> {
(**self).zip_lifetimes(a, b)
}
fn zip_consts(&mut self, a: &Const<I>, b: &Const<I>) -> Fallible<()> {
(**self).zip_consts(a, b)
}
fn zip_binders<T>(&mut self, a: &Binders<T>, b: &Binders<T>) -> Fallible<()>
where
T: HasInterner<Interner = I> + Zip<I> + Fold<I, I, Result = T>,
{
(**self).zip_binders(a, b)
}
fn interner(&self) -> &'i I {
Z::interner(*self)
}
}
/// The `Zip` trait walks two values, invoking the `Zipper` methods where
/// appropriate, but otherwise requiring strict equality.
///
/// See `Zipper` trait for more details.
///
/// To implement the trait, typically you would use one of the macros
/// like `eq_zip!`, `struct_zip!`, or `enum_zip!`.
pub trait Zip<I>: Debug
where
I: Interner,
{
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i;
}
impl<'a, T: ?Sized + Zip<I>, I: Interner> Zip<I> for &'a T {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
<T as Zip<I>>::zip_with(zipper, a, b)
}
}
impl<I: Interner> Zip<I> for () {
fn
|
<'i, Z: Zipper<'i, I>>(_: &mut Z, _: &Self, _: &Self) -> Fallible<()> {
Ok(())
}
}
impl<T: Zip<I>, I: Interner> Zip<I> for Vec<T> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
<[T] as Zip<I>>::zip_with(zipper, a, b)
}
}
impl<T: Zip<I>, I: Interner> Zip<I> for [T] {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
if a.len() != b.len() {
return Err(NoSolution);
}
for (a_elem, b_elem) in a.iter().zip(b) {
Zip::zip_with(zipper, a_elem, b_elem)?;
}
Ok(())
}
}
impl<T: Zip<I>, I: Interner> Zip<I> for Arc<T> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
<T as Zip<I>>::zip_with(zipper, a, b)
}
}
impl<T: Zip<I>, I: Interner> Zip<I> for Box<T> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
<T as Zip<I>>::zip_with(zipper, a, b)
}
}
impl<T: Zip<I>, U: Zip<I>, I: Interner> Zip<I> for (T, U) {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
Zip::zip_with(zipper, &a.0, &b.0)?;
Zip::zip_with(zipper, &a.1, &b.1)?;
Ok(())
}
}
impl<I: Interner> Zip<I> for Ty<I> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
zipper.zip_tys(a, b)
}
}
impl<I: Interner> Zip<I> for Lifetime<I> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
zipper.zip_lifetimes(a, b)
}
}
impl<I: Interner> Zip<I> for Const<I> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
zipper.zip_consts(a, b)
}
}
impl<I: Interner, T: HasInterner<Interner = I> + Zip<I> + Fold<I, I, Result = T>> Zip<I>
for Binders<T>
{
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
zipper.zip_binders(a, b)
}
}
/// Generates a Zip impl that requires the two values be
/// equal. Suitable for atomic, scalar values.
macro_rules! eq_zip {
($I:ident => $t:ty) => {
impl<$I: Interner> Zip<$I> for $t {
fn zip_with<'i, Z: Zipper<'i, $I>>(_zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
if a != b {
return Err(NoSolution);
}
Ok(())
}
}
};
}
eq_zip!(I => AdtId<I>);
eq_zip!(I => TraitId<I>);
eq_zip!(I => AssocTypeId<I>);
eq_zip!(I => OpaqueTyId<I>);
eq_zip!(I => TypeName<I>);
eq_zip!(I => QuantifierKind);
eq_zip!(I => PhantomData<I>);
eq_zip!(I => PlaceholderIndex);
eq_zip!(I => ClausePriority);
impl<T: HasInterner<Interner = I> + Zip<I>, I: Interner> Zip<I> for InEnvironment<T> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
Zip::zip_with(zipper, &a.environment, &b.environment)?;
Zip::zip_with(zipper, &a.goal, &b.goal)?;
Ok(())
}
}
impl<I: Interner> Zip<I> for Environment<I> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
let interner = zipper.interner();
assert_eq!(a.clauses.len(interner), b.clauses.len(interner)); // or different numbers of clauses
Zip::zip_with(
zipper,
a.clauses.as_slice(interner),
b.clauses.as_slice(interner),
)?;
Ok(())
}
}
impl<I: Interner> Zip<I> for Goals<I> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
let interner = zipper.interner();
Zip::zip_with(zipper, a.as_slice(interner), b.as_slice(interner))?;
Ok(())
}
}
impl<I: Interner> Zip<I> for ProgramClauses<I> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
let interner = zipper.interner();
Zip::zip_with(zipper, a.as_slice(interner), b.as_slice(interner))?;
Ok(())
}
}
impl<I: Interner> Zip<I> for QuantifiedWhereClauses<I> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
let interner = zipper.interner();
Zip::zip_with(zipper, a.as_slice(interner), b.as_slice(interner))?;
Ok(())
}
}
impl<I: Interner> Zip<I> for Substitution<I> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
let interner = zipper.interner();
Zip::zip_with(zipper, a.parameters(interner), b.parameters(interner))
}
}
// Annoyingly, Goal cannot use `enum_zip` because some variants have
// two parameters, and I'm too lazy to make the macro account for the
// relevant name mangling.
impl<I: Interner> Zip<I> for Goal<I> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
let interner = zipper.interner();
Zip::zip_with(zipper, a.data(interner), b.data(interner))
}
}
// I'm too lazy to make `enum_zip` support type parameters.
impl<I: Interner> Zip<I> for VariableKind<I> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
match (a, b) {
(VariableKind::Ty, VariableKind::Ty) => Ok(()),
(VariableKind::Lifetime, VariableKind::Lifetime) => Ok(()),
(VariableKind::Const(ty_a), VariableKind::Const(ty_b)) => {
Zip::zip_with(zipper, ty_a, ty_b)
}
(VariableKind::Ty, _) | (VariableKind::Lifetime, _) | (VariableKind::Const(_), _) => {
panic!("zipping things of mixed kind")
}
}
}
}
impl<I: Interner> Zip<I> for GenericArg<I> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
let interner = zipper.interner();
Zip::zip_with(zipper, a.data(interner), b.data(interner))
}
}
impl<I: Interner> Zip<I> for ProgramClause<I> {
fn zip_with<'i, Z: Zipper<'i, I>>(zipper: &mut Z, a: &Self, b: &Self) -> Fallible<()>
where
I: 'i,
{
let interner = zipper.interner();
Zip::zip_with(zipper, a.data(interner), b.data(interner))
}
}
|
zip_with
|
factories.py
|
__all__ = ['FACTORY_VIEW', 'FACTORIES_VIEW', 'FACTORY_INIT']
FACTORIES_VIEW = """import datetime
from drf_core import factories
from {{ app_name }}.models import ({% for model in models %}
{{ model.object_name }},{% endfor %}
)
{% for model in models %}
# =============================================================================
# {{ model.object_name }}
# =============================================================================
class {{ model.object_name }}Factory(factories.ModelFactory):
# Factory data for {{ model.object_name }} model.
{% for field in model.fields %}{% if field.factory.code_line %}{% autoescape off %}{{ field.factory.code_line }}
{% endautoescape %}{% endif %}{% endfor %}
class Meta:
model = {{ model.object_name }}
{% endfor %}
apps = [{% for model in models %}
{{ model.object_name }}Factory,{% endfor %}
]
"""
FACTORY_VIEW = """{% for required_lib in model_meta.factory_required_libs %}{{ required_lib }}
{% endfor %}
{% for required_module in model_meta.factory_required_modules %}{{ required_module }}
{% endfor %}
from drf_core import factories
from {{ app_name }}.models.{{ model_meta.verbose_name_plural }} import {{ model_meta.object_name }}
# =============================================================================
# {{ model_meta.object_name }}
# =============================================================================
class {{ model_meta.object_name }}Factory(factories.ModelFactory):
# Factory data for {{ model_meta.object_name }} model.
{% for field in model_meta.fields %}{% if field.factory.code_line %}{% autoescape off %}{{ field.factory.code_line }}
{% endautoescape %}{% endif %}{% endfor %}
class Meta:
model = {{ model_meta.object_name }}
|
FACTORY_INIT = """{% for model in models %}from {{ app_name }}.factories.{{ model.verbose_name_plural }} import {{ model.object_name }}Factory
{% endfor %}
apps = [{% for model in models %}
{{ model.object_name }}Factory,{% endfor %}
]
"""
|
"""
|
multi.py
|
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Hashable,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, index as libindex, lib
from pandas._libs.hashtable import duplicated_int64
from pandas._typing import AnyArrayLike, ArrayLike, Scalar
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes.cast import coerce_indexer_dtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_categorical_dtype,
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.dtypes.missing import array_equivalent, isna
import pandas.core.algorithms as algos
from pandas.core.arrays import Categorical
from pandas.core.arrays.categorical import factorize_from_iterables
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
InvalidIndexError,
_index_shared_docs,
ensure_index,
)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.missing as missing
from pandas.core.sorting import (
get_group_index,
indexer_from_factorized,
lexsort_indexer,
)
from pandas.io.formats.printing import (
format_object_attrs,
format_object_summary,
pprint_thing,
)
if TYPE_CHECKING:
from pandas import Series # noqa:F401
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass="MultiIndex", target_klass="MultiIndex or list of tuples")
)
class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine):
"""
This class manages a MultiIndex by mapping label combinations to positive
integers.
"""
_base = libindex.UInt64Engine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine):
"""
This class manages those (extreme) cases in which the number of possible
label combinations overflows the 64 bits integers, and uses an ObjectEngine
containing Python integers.
"""
_base = libindex.ObjectEngine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one Python integer (each), in a
strictly monotonic way (i.e. respecting the lexicographic order of
integer combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
int, or 1-dimensional array of dtype object
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits. Since this can overflow uint64, first make sure we are
# working with Python integers:
codes = codes.astype("object") << self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer (per row):
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes : sequence of arrays
Integers for each level designating which label at each location.
.. versionadded:: 0.24.0
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level).
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat).
copy : bool, default False
Copy the meta-data.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
Attributes
----------
names
levels
codes
nlevels
levshape
Methods
-------
from_arrays
from_tuples
from_product
from_frame
set_levels
set_codes
to_frame
to_flat_index
is_lexsorted
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
get_locs
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables.
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Index : The base pandas Index type.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`_
for more.
Examples
--------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
"""
_deprecations = Index._deprecations | frozenset()
# initialize to zero-length tuples to make everything work
_typ = "multiindex"
_names = FrozenList()
_levels = FrozenList()
_codes = FrozenList()
_comparables = ["names"]
rename = Index.set_names
_tuples = None
sortorder: Optional[int]
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
levels=None,
codes=None,
sortorder=None,
names=None,
dtype=None,
copy=False,
name=None,
verify_integrity: bool = True,
_set_identity: bool = True,
):
# compat with Index
if name is not None:
names = name
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
if len(levels) != len(codes):
raise ValueError("Length of levels and codes must be the same.")
if len(levels) == 0:
raise ValueError("Must pass non-zero number of levels/codes")
result = object.__new__(MultiIndex)
# we've already validated levels and codes, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
result._names = [None] * len(levels)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
new_codes = result._verify_integrity()
result._codes = new_codes
if _set_identity:
result._reset_identity()
return result
def _validate_codes(self, level: List, code: List):
"""
Reassign code values as -1 if their corresponding levels are NaN.
Parameters
----------
code : list
Code to reassign.
level : list
Level to check for missing values (NaN, NaT, None).
Returns
-------
new code where code value = -1 if it corresponds
to a level with missing values (NaN, NaT, None).
"""
null_mask = isna(level)
if np.any(null_mask):
code = np.where(null_mask[code], -1, code)
return code
def _verify_integrity(
self, codes: Optional[List] = None, levels: Optional[List] = None
):
"""
Parameters
----------
codes : optional list
Codes to check for validity. Defaults to current codes.
levels : optional list
Levels to check for validity. Defaults to current levels.
Raises
------
ValueError
If length of levels and codes don't match, if the codes for any
level would exceed level bounds, or there are any duplicate levels.
Returns
-------
new codes where code value = -1 if it corresponds to a
NaN level.
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
codes = codes or self.codes
levels = levels or self.levels
if len(levels) != len(codes):
raise ValueError(
"Length of levels and codes must match. NOTE: "
"this index is in an inconsistent state."
)
codes_length = len(codes[0])
for i, (level, level_codes) in enumerate(zip(levels, codes)):
if len(level_codes) != codes_length:
raise ValueError(
f"Unequal code lengths: {[len(code_) for code_ in codes]}"
)
if len(level_codes) and level_codes.max() >= len(level):
raise ValueError(
f"On level {i}, code max ({level_codes.max()}) >= length of "
f"level ({len(level)}). NOTE: this index is in an "
"inconsistent state"
)
if len(level_codes) and level_codes.min() < -1:
raise ValueError(f"On level {i}, code value ({level_codes.min()}) < -1")
if not level.is_unique:
raise ValueError(
f"Level values must be unique: {list(level)} on level {i}"
)
if self.sortorder is not None:
if self.sortorder > self._lexsort_depth():
raise ValueError(
"Value for sortorder must be inferior or equal to actual "
f"lexsort_depth: sortorder {self.sortorder} "
f"with lexsort_depth {self._lexsort_depth()}"
)
codes = [
self._validate_codes(level, code) for level, code in zip(levels, codes)
]
new_codes = FrozenList(codes)
return new_codes
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=lib.no_default):
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
error_msg = "Input must be a list / sequence of array-likes."
if not is_list_like(arrays):
raise TypeError(error_msg)
elif is_iterator(arrays):
arrays = list(arrays)
# Check if elements of array are list-like
for array in arrays:
if not is_list_like(array):
raise TypeError(error_msg)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError("all arrays must be same length")
codes, levels = factorize_from_iterables(arrays)
if names is lib.no_default:
names = [getattr(arr, "name", None) for arr in arrays]
return MultiIndex(
levels=levels,
codes=codes,
sortorder=sortorder,
names=names,
verify_integrity=False,
)
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None):
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> tuples = [(1, 'red'), (1, 'blue'),
... (2, 'red'), (2, 'blue')]
>>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
if not is_list_like(tuples):
raise TypeError("Input must be a list / sequence of tuple-likes.")
elif is_iterator(tuples):
tuples = list(tuples)
if len(tuples) == 0:
if names is None:
raise TypeError("Cannot infer number of levels from empty list")
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = tuples._values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrays = zip(*tuples)
return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=lib.no_default):
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
.. versionchanged:: 1.0.0
If not explicitly provided, names will be inferred from the
elements of iterables if an element has a name attribute
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ['green', 'purple']
>>> pd.MultiIndex.from_product([numbers, colors],
... names=['number', 'color'])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
"""
from pandas.core.reshape.util import cartesian_product
if not is_list_like(iterables):
raise TypeError("Input must be a list / sequence of iterables.")
elif is_iterator(iterables):
iterables = list(iterables)
codes, levels = factorize_from_iterables(iterables)
if names is lib.no_default:
names = [getattr(it, "name", None) for it in iterables]
codes = cartesian_product(codes)
return MultiIndex(levels, codes, sortorder=sortorder, names=names)
@classmethod
def from_frame(cls, df, sortorder=None, names=None):
"""
Make a MultiIndex from a DataFrame.
.. versionadded:: 0.24.0
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
sortorder : int, optional
Level of sortedness (must be lexicographically sorted by that
level).
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
... ['NJ', 'Temp'], ['NJ', 'Precip']],
... columns=['a', 'b'])
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> pd.MultiIndex.from_frame(df)
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> pd.MultiIndex.from_frame(df, names=['state', 'observation'])
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
"""
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
column_names, columns = zip(*df.items())
names = column_names if names is None else names
return cls.from_arrays(columns, sortorder=sortorder, names=names)
# --------------------------------------------------------------------
@property
def _values(self):
# We override here, since our parent uses _data, which we don't use.
return self.values
@property
def values(self):
if self._tuples is not None:
return self._tuples
values = []
for i in range(self.nlevels):
vals = self._get_level_values(i)
if is_categorical_dtype(vals):
vals = vals._internal_get_values()
if isinstance(vals.dtype, ExtensionDtype) or hasattr(vals, "_box_values"):
vals = vals.astype(object)
vals = np.array(vals, copy=False)
values.append(vals)
self._tuples = lib.fast_zip(values)
return self._tuples
@property
def array(self):
"""
Raises a ValueError for `MultiIndex` because there's no single
array backing a MultiIndex.
Raises
------
ValueError
"""
raise ValueError(
"MultiIndex has no single backing array. Use "
"'MultiIndex.to_numpy()' to get a NumPy array of tuples."
)
@property
def shape(self):
"""
Return a tuple of the shape of the underlying data.
"""
# overriding the base Index.shape definition to avoid materializing
# the values (GH-27384, GH-27775)
return (len(self),)
def __len__(self) -> int:
return len(self.codes[0])
# --------------------------------------------------------------------
# Levels Methods
@cache_readonly
def levels(self):
# Use cache_readonly to ensure that self.get_locs doesn't repeatedly
# create new IndexEngine
# https://github.com/pandas-dev/pandas/issues/31648
result = [
x._shallow_copy(name=name) for x, name in zip(self._levels, self._names)
]
for level in result:
# disallow midx.levels[0].name = "foo"
level._no_setting_name = True
return FrozenList(result)
def _set_levels(
self, levels, level=None, copy=False, validate=True, verify_integrity=False
):
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate:
if len(levels) == 0:
raise ValueError("Must set non-zero number of levels.")
if level is None and len(levels) != self.nlevels:
raise ValueError("Length of levels must match number of levels.")
if level is not None and len(levels) != len(level):
raise ValueError("Length of levels must match length of level.")
if level is None:
new_levels = FrozenList(
ensure_index(lev, copy=copy)._shallow_copy() for lev in levels
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_levels = list(self._levels)
for lev_num, lev in zip(level_numbers, levels):
new_levels[lev_num] = ensure_index(lev, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
if verify_integrity:
new_codes = self._verify_integrity(levels=new_levels)
self._codes = new_codes
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._tuples = None
self._reset_cache()
def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
"""
Set new levels on MultiIndex. Defaults to returning new index.
Parameters
----------
levels : sequence or list of sequence
New level(s) to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'),
(2, 'one'), (2, 'two'),
(3, 'one'), (3, 'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2]])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b', 'c'], level=0)
MultiIndex([('a', 'one'),
('a', 'two'),
('b', 'one'),
('b', 'two'),
('c', 'one'),
('c', 'two')],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b'], level='bar')
MultiIndex([(1, 'a'),
(1, 'b'),
(2, 'a'),
(2, 'b'),
(3, 'a'),
(3, 'b')],
names=['foo', 'bar'])
If any of the levels passed to ``set_levels()`` exceeds the
existing length, all of the values from that argument will
be stored in the MultiIndex levels, though the values will
be truncated in the MultiIndex output.
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2)],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels
FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])
"""
if is_list_like(levels) and not isinstance(levels, Index):
levels = list(levels)
if level is not None and not is_list_like(level):
if not is_list_like(levels):
raise TypeError("Levels must be list-like")
if is_list_like(levels[0]):
raise TypeError("Levels must be list-like")
level = [level]
levels = [levels]
elif level is None or is_list_like(level):
if not is_list_like(levels) or not is_list_like(levels[0]):
raise TypeError("Levels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_levels(
levels, level=level, validate=True, verify_integrity=verify_integrity
)
if not inplace:
return idx
@property
def nlevels(self) -> int:
"""
Integer number of levels in this MultiIndex.
"""
return len(self._levels)
@property
def levshape(self):
"""
A tuple with the length of each level.
"""
return tuple(len(x) for x in self.levels)
# --------------------------------------------------------------------
# Codes Methods
@property
def codes(self):
return self._codes
def _set_codes(
self, codes, level=None, copy=False, validate=True, verify_integrity=False
):
if validate:
if level is None and len(codes) != self.nlevels:
raise ValueError("Length of codes must match number of levels")
if level is not None and len(codes) != len(level):
raise ValueError("Length of codes must match length of levels.")
if level is None:
new_codes = FrozenList(
_coerce_indexer_frozen(level_codes, lev, copy=copy).view()
for lev, level_codes in zip(self._levels, codes)
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_codes = list(self._codes)
for lev_num, level_codes in zip(level_numbers, codes):
lev = self.levels[lev_num]
new_codes[lev_num] = _coerce_indexer_frozen(level_codes, lev, copy=copy)
new_codes = FrozenList(new_codes)
if verify_integrity:
new_codes = self._verify_integrity(codes=new_codes)
self._codes = new_codes
self._tuples = None
self._reset_cache()
def set_codes(self, codes, level=None, inplace=False, verify_integrity=True):
"""
Set new codes on MultiIndex. Defaults to returning
new index.
.. versionadded:: 0.24.0
New name for deprecated method `set_labels`.
Parameters
----------
codes : sequence or list of sequence
New codes to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
verify_integrity : bool (default True)
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = pd.MultiIndex.from_tuples([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([1, 0, 1, 0], level=0)
MultiIndex([(2, 'one'),
(1, 'two'),
(2, 'one'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([0, 0, 1, 1], level='bar')
MultiIndex([(1, 'one'),
(1, 'one'),
(2, 'two'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(codes):
raise TypeError("Codes must be list-like")
if is_list_like(codes[0]):
raise TypeError("Codes must be list-like")
level = [level]
codes = [codes]
elif level is None or is_list_like(level):
if not is_list_like(codes) or not is_list_like(codes[0]):
raise TypeError("Codes must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_codes(codes, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# --------------------------------------------------------------------
# Index Internals
@cache_readonly
def _engine(self):
# Calculate the number of bits needed to represent labels in each
# level, as log2 of their sizes (including -1 for NaN):
sizes = np.ceil(np.log2([len(l) + 1 for l in self.levels]))
# Sum bit counts, starting from the _right_....
lev_bits = np.cumsum(sizes[::-1])[::-1]
# ... in order to obtain offsets such that sorting the combination of
# shifted codes (one for each level, resulting in a unique integer) is
# equivalent to sorting lexicographically the codes themselves. Notice
# that each level needs to be shifted by the number of bits needed to
# represent the _previous_ ones:
offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64")
# Check the total number of bits needed for our representation:
if lev_bits[0] > 64:
# The levels would overflow a 64 bit uint - use Python integers:
return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
return MultiIndexUIntEngine(self.levels, self.codes, offsets)
@property
def _constructor(self):
return MultiIndex.from_tuples
@Appender(Index._shallow_copy.__doc__)
def _shallow_copy(self, values=None, **kwargs):
if values is not None:
names = kwargs.pop("names", kwargs.pop("name", self.names))
# discards freq
kwargs.pop("freq", None)
return MultiIndex.from_tuples(values, names=names, **kwargs)
return self.copy(**kwargs)
def _shallow_copy_with_infer(self, values, **kwargs):
# On equal MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
if len(values) == 0:
return MultiIndex(
levels=[[] for _ in range(self.nlevels)],
codes=[[] for _ in range(self.nlevels)],
**kwargs,
)
return self._shallow_copy(values, **kwargs)
# --------------------------------------------------------------------
def copy(
self,
names=None,
dtype=None,
levels=None,
codes=None,
deep=False,
name=None,
_set_identity=False,
):
"""
Make a copy of this object. Names, dtype, levels and codes can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
levels : sequence, optional
codes : sequence, optional
deep : bool, default False
name : Label
Kept for compatibility with 1-dimensional Index. Should not be used.
Returns
-------
MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
names = self._validate_names(name=name, names=names, deep=deep)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if codes is None:
codes = deepcopy(self.codes)
else:
if levels is None:
levels = self.levels
if codes is None:
codes = self.codes
return MultiIndex(
levels=levels,
codes=codes,
names=names,
sortorder=self.sortorder,
verify_integrity=False,
_set_identity=_set_identity,
)
def __array__(self, dtype=None) -> np.ndarray:
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
@Appender(Index.__contains__.__doc__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
return True
except (LookupError, TypeError, ValueError):
return False
@cache_readonly
def dtype(self) -> np.dtype:
return np.dtype("O")
def _is_memory_usage_qualified(self) -> bool:
|
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep: bool = False) -> int:
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation unnecessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self) -> int:
""" return the number of bytes in the underlying data """
return self._nbytes(False)
def _nbytes(self, deep: bool = False) -> int:
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.codes)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# --------------------------------------------------------------------
# Rendering Methods
def _formatter_func(self, tup):
"""
Formats each item in tup according to its level's formatter function.
"""
formatter_funcs = [level._formatter_func for level in self.levels]
return tuple(func(val) for func, val in zip(formatter_funcs, tup))
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
return format_object_summary(
self, self._formatter_func, name=name, line_break_each_value=True
)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
return format_object_attrs(self, include_dtype=False)
def _format_native_types(self, na_rep="nan", **kwargs):
new_levels = []
new_codes = []
# go through the levels and format them
for level, level_codes in zip(self.levels, self.codes):
level = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = level_codes == -1
if mask.any():
nan_index = len(level)
level = np.append(level, na_rep)
assert not level_codes.flags.writeable # i.e. copy is needed
level_codes = level_codes.copy() # make writeable
level_codes[mask] = nan_index
new_levels.append(level)
new_codes.append(level_codes)
if len(new_levels) == 1:
# a single-level multi-index
return Index(new_levels[0].take(new_codes[0]))._format_native_types()
else:
# reconstruct the multi-index
mi = MultiIndex(
levels=new_levels,
codes=new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
return mi.values
def format(
self,
space=2,
sparsify=None,
adjoin=True,
names=False,
na_rep=None,
formatter=None,
):
if len(self) == 0:
return []
stringified_levels = []
for lev, level_codes in zip(self.levels, self.codes):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(level_codes).format(formatter=formatter)
# we have some NA
mask = level_codes == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [
pprint_thing(na if isna(x) else x, escape_chars=("\t", "\r", "\n"))
for x in algos.take_1d(lev._values, level_codes)
]
stringified_levels.append(formatted)
result_levels = []
for lev, name in zip(stringified_levels, self.names):
level = []
if names:
level.append(
pprint_thing(name, escape_chars=("\t", "\r", "\n"))
if name is not None
else ""
)
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ""
# GH3547
# use value of sparsify as sentinel, unless it's an obvious
# "Truthy" value
if sparsify not in [True, 1]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = _sparsify(
result_levels, start=int(names), sentinel=sentinel
)
if adjoin:
from pandas.io.formats.format import _get_adjustment
adj = _get_adjustment()
return adj.adjoin(space, *result_levels).split("\n")
else:
return result_levels
# --------------------------------------------------------------------
# Names Methods
def _get_names(self):
return FrozenList(self._names)
def _set_names(self, names, level=None, validate=True):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : boolean, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError("Names should be list-like for a MultiIndex")
names = list(names)
if validate:
if level is not None and len(names) != len(level):
raise ValueError("Length of names must match length of level.")
if level is None and len(names) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(lev) for lev in level]
# set the name
for lev, name in zip(level, names):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError(
f"{type(self).__name__}.name must be a hashable type"
)
self._names[lev] = name
# If .levels has been accessed, the names in our cache will be stale.
self._reset_cache()
names = property(
fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex.\n"""
)
# --------------------------------------------------------------------
@Appender(Index._get_grouper_for_level.__doc__)
def _get_grouper_for_level(self, mapper, level):
indexer = self.codes[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
codes, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_codes, uniques = algos.factorize(indexer[mask], sort=True)
codes = np.empty(len(indexer), dtype=indexer.dtype)
codes[mask] = ok_codes
codes[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
else:
# break references back to us so that setting the name
# on the output of a groupby doesn't reflect back here.
level_index = level_index.copy()
if level_index._can_hold_na:
grouper = level_index.take(codes, fill_value=True)
else:
grouper = level_index.take(codes)
return grouper, codes, level_index
@cache_readonly
def inferred_type(self) -> str:
return "mixed"
def _get_level_number(self, level) -> int:
count = self.names.count(level)
if (count > 1) and not is_integer(level):
raise ValueError(
f"The name {level} occurs multiple times, use a level number"
)
try:
level = self.names.index(level)
except ValueError as err:
if not is_integer(level):
raise KeyError(f"Level {level} not found") from err
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"{orig_level} is not a valid level number"
) from err
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"not {level + 1}"
) from err
return level
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
if all(x.is_monotonic for x in self.levels):
# If each level is sorted, we can operate on the codes directly. GH27495
return libalgos.is_lexsorted(
[x.astype("int64", copy=False) for x in self.codes]
)
# reversed() because lexsort() wants the most significant key last.
values = [
self._get_level_values(i).values for i in reversed(range(len(self.levels)))
]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self.values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
@cache_readonly
def _inferred_type_levels(self):
""" return a list of the inferred types, one for each level """
return [i.inferred_type for i in self.levels]
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep="first"):
shape = map(len, self.levels)
ids = get_group_index(self.codes, shape, sort=False, xnull=False)
return duplicated_int64(ids, keep)
def fillna(self, value=None, downcast=None):
"""
fillna is not implemented for MultiIndex
"""
raise NotImplementedError("isna is not defined for MultiIndex")
@Appender(Index.dropna.__doc__)
def dropna(self, how="any"):
nans = [level_codes == -1 for level_codes in self.codes]
if how == "any":
indexer = np.any(nans, axis=0)
elif how == "all":
indexer = np.all(nans, axis=0)
else:
raise ValueError(f"invalid how option: {how}")
new_codes = [level_codes[~indexer] for level_codes in self.codes]
return self.copy(codes=new_codes, deep=True)
def _get_level_values(self, level, unique=False):
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int level
unique : bool, default False
if True, drop duplicated values
Returns
-------
values : ndarray
"""
lev = self.levels[level]
level_codes = self.codes[level]
name = self._names[level]
if unique:
level_codes = algos.unique(level_codes)
filled = algos.take_1d(lev._values, level_codes, fill_value=lev._na_value)
return lev._shallow_copy(filled, name=name)
def get_level_values(self, level):
"""
Return vector of label values for requested level,
equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
Values is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
--------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
@Appender(Index.unique.__doc__)
def unique(self, level=None):
if level is None:
return super().unique()
else:
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.set_levels([i._to_safe_for_reshape() for i in self.levels])
def to_frame(self, index=True, name=None):
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
.. versionadded:: 0.24.0
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of str, optional
The passed names should substitute index level names.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
See Also
--------
DataFrame
"""
from pandas import DataFrame
if name is not None:
if not is_list_like(name):
raise TypeError("'name' must be a list / sequence of column names.")
if len(name) != len(self.levels):
raise ValueError(
"'name' should have same length as number of levels on index."
)
idx_names = name
else:
idx_names = self.names
# Guarantee resulting column order - PY36+ dict maintains insertion order
result = DataFrame(
{
(level if lvlname is None else lvlname): self._get_level_values(level)
for lvlname, level in zip(idx_names, range(len(self.levels)))
},
copy=False,
)
if index:
result.index = self
return result
def to_flat_index(self):
"""
Convert a MultiIndex to an Index of Tuples containing the level values.
.. versionadded:: 0.24.0
Returns
-------
pd.Index
Index with the MultiIndex data represented in Tuples.
Notes
-----
This method will simply return the caller if called by anything other
than a MultiIndex.
Examples
--------
>>> index = pd.MultiIndex.from_product(
... [['foo', 'bar'], ['baz', 'qux']],
... names=['a', 'b'])
>>> index.to_flat_index()
Index([('foo', 'baz'), ('foo', 'qux'),
('bar', 'baz'), ('bar', 'qux')],
dtype='object')
"""
return Index(self.values, tupleize_cols=False)
@property
def is_all_dates(self) -> bool:
return False
def is_lexsorted(self) -> bool:
"""
Return True if the codes are lexicographically sorted.
Returns
-------
bool
"""
return self.lexsort_depth == self.nlevels
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
return self.sortorder
return self._lexsort_depth()
def _lexsort_depth(self) -> int:
"""
Compute and return the lexsort_depth, the number of levels of the
MultiIndex that are sorted lexically
Returns
-------
int
"""
int64_codes = [ensure_int64(level_codes) for level_codes in self.codes]
for k in range(self.nlevels, 0, -1):
if libalgos.is_lexsorted(int64_codes[:k]):
return k
return 0
def _sort_levels_monotonic(self):
"""
This is an *internal* function.
Create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.sort_values()
MultiIndex([('a', 'aa'),
('a', 'bb'),
('b', 'aa'),
('b', 'bb')],
)
"""
if self.is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_codes = []
for lev, level_codes in zip(self.levels, self.codes):
if not lev.is_monotonic:
try:
# indexer to reorder the levels
indexer = lev.argsort()
except TypeError:
pass
else:
lev = lev.take(indexer)
# indexer to reorder the level codes
indexer = ensure_int64(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
level_codes = algos.take_1d(ri, level_codes)
new_levels.append(lev)
new_codes.append(level_codes)
return MultiIndex(
new_levels,
new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def remove_unused_levels(self):
"""
Create a new MultiIndex from the current that removes
unused levels, meaning that they are not expressed in the labels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_product([range(2), list('ab')])
>>> mi
MultiIndex([(0, 'a'),
(0, 'b'),
(1, 'a'),
(1, 'b')],
)
>>> mi[2:]
MultiIndex([(1, 'a'),
(1, 'b')],
)
The 0 from the first level is not represented
and can be removed
>>> mi2 = mi[2:].remove_unused_levels()
>>> mi2.levels
FrozenList([[1], ['a', 'b']])
"""
new_levels = []
new_codes = []
changed = False
for lev, level_codes in zip(self.levels, self.codes):
# Since few levels are typically unused, bincount() is more
# efficient than unique() - however it only accepts positive values
# (and drops order):
uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
# We have unused levels
changed = True
# Recalculate uniques, now preserving order.
# Can easily be cythonized by exploiting the already existing
# "uniques" and stop parsing "level_codes" when all items
# are found:
uniques = algos.unique(level_codes)
if has_na:
na_idx = np.where(uniques == -1)[0]
# Just ensure that -1 is in first position:
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
# codes get mapped from uniques to 0:len(uniques)
# -1 (if present) is mapped to last position
code_mapping = np.zeros(len(lev) + has_na)
# ... and reassigned value -1:
code_mapping[uniques] = np.arange(len(uniques)) - has_na
level_codes = code_mapping[level_codes]
# new levels are simple
lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_codes.append(level_codes)
result = self.view()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_codes(new_codes, validate=False)
return result
# --------------------------------------------------------------------
# Pickling Methods
def __reduce__(self):
"""Necessary for making this object picklable"""
d = dict(
levels=list(self.levels),
codes=list(self.codes),
sortorder=self.sortorder,
names=list(self.names),
)
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
def __getitem__(self, key):
if is_scalar(key):
key = com.cast_scalar_indexer(key)
retval = []
for lev, level_codes in zip(self.levels, self.codes):
if level_codes[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[level_codes[key]])
return tuple(retval)
else:
if com.is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
sortorder = self.sortorder
else:
# cannot be sure whether the result will be sorted
sortorder = None
if isinstance(key, Index):
key = np.asarray(key)
new_codes = [level_codes[key] for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_platform_int(indices)
taken = self._assert_take_fillable(
self.codes,
indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1,
)
return MultiIndex(
levels=self.levels, codes=taken, names=self.names, verify_integrity=False
)
def _assert_take_fillable(
self, values, indices, allow_fill=True, fill_value=None, na_value=None
):
""" Internal method to handle NA filling of take """
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
raise ValueError(msg)
taken = [lab.take(indices) for lab in self.codes]
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label
label_values[mask] = na_value
masked.append(np.asarray(label_values))
taken = masked
else:
taken = [lab.take(indices) for lab in self.codes]
return taken
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all(
(isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other
):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self.values,) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except (TypeError, IndexError):
return Index(new_tuples)
def argsort(self, *args, **kwargs) -> np.ndarray:
return self.values.argsort(*args, **kwargs)
@Appender(_index_shared_docs["repeat"] % _index_doc_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
repeats = ensure_platform_int(repeats)
return MultiIndex(
levels=self.levels,
codes=[
level_codes.view(np.ndarray).astype(np.intp).repeat(repeats)
for level_codes in self.codes
],
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def where(self, cond, other=None):
raise NotImplementedError(".where is not supported for MultiIndex operations")
def drop(self, codes, level=None, errors="raise"):
"""
Make new MultiIndex with passed list of codes deleted
Parameters
----------
codes : array-like
Must be a list of tuples
level : int or level name, default None
errors : str, default 'raise'
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(codes, level, errors)
if not isinstance(codes, (np.ndarray, Index)):
try:
codes = com.index_labels_to_array(codes, dtype=object)
except ValueError:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
inds.extend(range(loc.start, loc.stop))
elif com.is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn(
"dropping on a non-lexsorted multi-index "
"without a level parameter may impact performance.",
PerformanceWarning,
stacklevel=3,
)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = f"unsupported indexer of type {type(loc)}"
raise AssertionError(msg)
except KeyError:
if errors != "ignore":
raise
return self.delete(inds)
def _drop_from_level(self, codes, level, errors="raise"):
codes = com.index_labels_to_array(codes)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(codes)
mask = ~algos.isin(self.codes[i], values)
if mask.all() and errors != "ignore":
raise KeyError(f"labels {codes} not found in level")
return self[mask]
def swaplevel(self, i=-2, j=-1):
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int, str, default -2
First level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
j : int, str, default -1
Second level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
Returns
-------
MultiIndex
A new MultiIndex.
See Also
--------
Series.swaplevel : Swap levels i and j in a MultiIndex.
Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a
particular axis.
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.swaplevel(0, 1)
MultiIndex([('bb', 'a'),
('aa', 'a'),
('bb', 'b'),
('aa', 'b')],
)
"""
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_codes[i], new_codes[j] = new_codes[j], new_codes[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def reorder_levels(self, order):
"""
Rearrange levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
Returns
-------
MultiIndex
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(
f"Length of order must be same as number of levels ({self.nlevels}), "
f"got {len(order)}"
)
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def _get_codes_for_sorting(self):
"""
we categorizing our codes by using the
available categories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
def cats(level_codes):
return np.arange(
np.array(level_codes).max() + 1 if len(level_codes) else 0,
dtype=level_codes.dtype,
)
return [
Categorical.from_codes(level_codes, cats(level_codes), ordered=True)
for level_codes in self.codes
]
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level.
If list-like must be names or ints of levels.
ascending : bool, default True
False to sort in descending order.
Can also be a list to specify a directed ordering.
sort_remaining : sort by the remaining levels after level
Returns
-------
sorted_index : pd.MultiIndex
Resulting index.
indexer : np.ndarray
Indices of output values in original index.
"""
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
indexer = lexsort_indexer(
[self.codes[lev] for lev in level], orders=ascending
)
# level ordering
else:
codes = list(self.codes)
shape = list(self.levshape)
# partition codes and shape
primary = tuple(codes[lev] for lev in level)
primshp = tuple(shape[lev] for lev in level)
# Reverse sorted to retain the order of
# smaller indices that needs to be removed
for lev in sorted(level, reverse=True):
codes.pop(lev)
shape.pop(lev)
if sort_remaining:
primary += primary + tuple(codes)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp, compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = ensure_platform_int(indexer)
new_codes = [level_codes.take(indexer) for level_codes in self.codes]
new_index = MultiIndex(
codes=new_codes,
levels=self.levels,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
return new_index, indexer
def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray or None
Indices of output values in original index.
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, "names")
if level is not None:
if method is not None:
raise TypeError("Fill method not supported if level passed")
# GH7774: preserve dtype/tz if target is empty and not an Index.
# target may be an iterator
target = ibase._ensure_has_len(target)
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop("freq", None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs)
else:
target = ensure_index(target)
target, indexer, _ = self._join_level(
target, level, how="right", return_indexers=True, keep_order=False
)
else:
target = ensure_index(target)
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
else:
raise ValueError("cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (
preserve_names
and target.nlevels == self.nlevels
and target.names != self.names
):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
# --------------------------------------------------------------------
# Indexing Methods
def get_value(self, series, key):
# Label-based
if not is_hashable(key) or is_iterator(key):
# We allow tuples if they are hashable, whereas other Index
# subclasses require scalar.
# We have to explicitly exclude generators, as these are hashable.
raise InvalidIndexError(key)
try:
loc = self.get_loc(key)
except KeyError:
if is_integer(key):
loc = key
else:
raise
return self._get_values_for_loc(series, loc, key)
def _get_values_for_loc(self, series: "Series", loc, key):
"""
Do a positional lookup on the given Series, returning either a scalar
or a Series.
Assumes that `series.index is self`
"""
new_values = series._values[loc]
if is_scalar(loc):
return new_values
new_index = self[loc]
new_index = maybe_droplevels(new_index, key)
new_ser = series._constructor(new_values, index=new_index, name=series.name)
return new_ser.__finalize__(series)
def _convert_listlike_indexer(self, keyarr):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
indexer, keyarr = super()._convert_listlike_indexer(keyarr)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0], tuple):
level = 0
_, indexer = self.reindex(keyarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(self))
check = self.levels[0].get_indexer(keyarr)
mask = check == -1
if mask.any():
raise KeyError(f"{keyarr[mask]} not in index")
return indexer, keyarr
def _get_partial_string_timestamp_match_key(self, key):
"""
Translate any partial string timestamp matches in key, returning the
new key.
Only relevant for MultiIndex.
"""
# GH#10331
if isinstance(key, str) and self.levels[0]._supports_partial_string_indexing:
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = tuple([key] + [slice(None)] * (len(self.levels) - 1))
if isinstance(key, tuple):
# Convert (..., '2016-01-01', ...) in tuple to
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
if (
isinstance(component, str)
and self.levels[i]._supports_partial_string_indexing
):
new_key.append(slice(component, component, None))
else:
new_key.append(component)
key = tuple(new_key)
return key
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = ensure_index(target)
# empty indexer
if is_list_like(target) and not len(target):
return ensure_platform_int(np.array([]))
if not isinstance(target, MultiIndex):
try:
target = MultiIndex.from_tuples(target)
except (TypeError, ValueError):
# let's instead try with a straight Index
if method is None:
return Index(self.values).get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
if not self.is_unique:
raise ValueError("Reindexing only valid with uniquely valued Index objects")
if method == "pad" or method == "backfill":
if tolerance is not None:
raise NotImplementedError(
"tolerance not implemented yet for MultiIndex"
)
indexer = self._engine.get_indexer(target, method, limit)
elif method == "nearest":
raise NotImplementedError(
"method='nearest' not implemented yet "
"for MultiIndex; see GitHub issue 9365"
)
else:
indexer = self._engine.get_indexer(target)
return ensure_platform_int(indexer)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
return super().get_indexer_non_unique(target)
def get_slice_bound(
self, label: Union[Hashable, Sequence[Hashable]], side: str, kind: str
) -> int:
"""
For an ordered MultiIndex, compute slice bound
that corresponds to given label.
Returns leftmost (one-past-the-rightmost if `side=='right') position
of given label.
Parameters
----------
label : object or tuple of objects
side : {'left', 'right'}
kind : {'loc', 'getitem'}
Returns
-------
int
Index of label.
Notes
-----
This method only works if level 0 index of the MultiIndex is lexsorted.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbc'), list('gefd')])
Get the locations from the leftmost 'b' in the first level
until the end of the multiindex:
>>> mi.get_slice_bound('b', side="left", kind="loc")
1
Like above, but if you get the locations from the rightmost
'b' in the first level and 'f' in the second level:
>>> mi.get_slice_bound(('b','f'), side="right", kind="loc")
3
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
if not isinstance(label, tuple):
label = (label,)
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super().slice_locs(start, end, step, kind=kind)
def _partial_tup_index(self, tup, side="left"):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth "
f"({self.lexsort_depth})"
)
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.codes)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev and not isna(lab):
if not lev.is_type_compatible(lib.infer_dtype([lab], skipna=False)):
raise TypeError(f"Level type mismatch: {lab}")
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == "right" and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = self._get_loc_single_level_index(lev, lab)
if k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
else:
return start + section.searchsorted(idx, side=side)
def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:
"""
If key is NA value, location of index unify as -1.
Parameters
----------
level_index: Index
key : label
Returns
-------
loc : int
If key is NA value, loc is -1
Else, location of key in index.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
"""
if is_scalar(key) and isna(key):
return -1
else:
return level_index.get_loc(key)
def get_loc(self, key, method=None):
"""
Get location for a label or a tuple of labels as an integer, slice or
boolean mask.
Parameters
----------
key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Notes
-----
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_loc('b')
slice(1, 3, None)
>>> mi.get_loc(('b', 'e'))
1
"""
if method is not None:
raise NotImplementedError(
"only the default get_loc method is "
"currently supported for MultiIndex"
)
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != "int64":
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype="bool")
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, (tuple, list)):
# not including list here breaks some indexing, xref #30892
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError(
f"Key length ({keylen}) exceeds index depth ({self.nlevels})"
)
if keylen == self.nlevels and self.is_unique:
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self.lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = (
self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))
)
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn(
"indexing past lexsort depth may impact performance.",
PerformanceWarning,
stacklevel=10,
)
loc = np.arange(start, stop, dtype="int64")
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self._get_loc_single_level_index(
self.levels[i], k
)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)
def get_loc_level(self, key, level=0, drop_level: bool = True):
"""
Get both the location for the requested label(s) and the
resulting sliced index.
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
If ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False], dtype=bool),
Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
"""
# different name to distinguish from maybe_droplevels
def maybe_mi_droplevels(indexer, levels, drop_level: bool):
if not drop_level:
return self[indexer]
# kludgearound
orig_index = new_index = self[indexer]
levels = [self._get_level_number(i) for i in levels]
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
except ValueError:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError(
"Key for location must have same length as number of levels"
)
result = None
for lev, k in zip(level, key):
loc, new_index = self.get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_mi_droplevels(result, level, drop_level)
level = self._get_level_number(level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_mi_droplevels(indexer, [0], drop_level)
return indexer, new_index
except (TypeError, InvalidIndexError):
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [
i for i in range(len(key)) if key[i] != slice(None, None)
]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
if len(key) == self.nlevels and self.is_unique:
# Complete key in unique index -> standard get_loc
try:
return (self._engine.get_loc(key), None)
except KeyError as e:
raise KeyError(key) from e
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_mi_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level=0, indexer=None):
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
level_codes = self.codes[level]
def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
# given the inputs and the codes/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(codes):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = codes.take(ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)._ndarray_values
else:
m = np.zeros(len(codes), dtype=bool)
m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returning a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(
key.start, key.stop, key.step, kind="loc"
)
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
start = getattr(start, "start", start)
stop = getattr(stop, "stop", stop)
return convert_indexer(start, stop, step)
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = level_codes.searchsorted(start, side="left")
j = level_codes.searchsorted(stop, side="right")
return slice(i, j, step)
else:
code = self._get_loc_single_level_index(level_index, key)
if level > 0 or self.lexsort_depth == 0:
# Desired level is not sorted
locs = np.array(level_codes == code, dtype=bool, copy=False)
if not locs.any():
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return locs
i = level_codes.searchsorted(code, side="left")
j = level_codes.searchsorted(code, side="right")
if i == j:
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return slice(i, j)
def get_locs(self, seq):
"""
Get location for a sequence of labels.
Parameters
----------
seq : label, slice, list, mask or a sequence of such
You should use one of the above for each level.
If a level should not be used, set it to ``slice(None)``.
Returns
-------
numpy.ndarray
NumPy array of integers suitable for passing to iloc.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_locs('b') # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([slice(None), ['e', 'f']]) # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([[True, False, True], slice('e', 'f')]) # doctest: +SKIP
array([2], dtype=int64)
"""
from pandas.core.indexes.numeric import Int64Index
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self.lexsort_depth:
raise UnsortedIndexError(
"MultiIndex slicing requires the index to be lexsorted: slicing "
f"on levels {true_slices}, lexsort depth {self.lexsort_depth}"
)
# indexer
# this is the list of all values that we want to select
n = len(self)
indexer = None
def _convert_to_indexer(r):
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif com.is_bool_indexer(r):
if len(r) != n:
raise ValueError(
"cannot index with a boolean indexer "
"that is not the same length as the "
"index"
)
r = r.nonzero()[0]
return Int64Index(r)
def _update_indexer(idxr, indexer=indexer):
if indexer is None:
indexer = Index(np.arange(n))
if idxr is None:
return indexer
return indexer & idxr
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
indexer = _update_indexer(_convert_to_indexer(k), indexer=indexer)
elif is_list_like(k):
# a collection of labels to include from this level (these
# are or'd)
indexers = None
for x in k:
try:
idxrs = _convert_to_indexer(
self._get_level_indexer(x, level=i, indexer=indexer)
)
indexers = idxrs if indexers is None else indexers | idxrs
except KeyError:
# ignore not founds
continue
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer)
else:
# no matches we are done
return Int64Index([])._ndarray_values
elif com.is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer)
elif isinstance(k, slice):
# a slice, include BOTH of the labels
indexer = _update_indexer(
_convert_to_indexer(
self._get_level_indexer(k, level=i, indexer=indexer)
),
indexer=indexer,
)
else:
# a single label
indexer = _update_indexer(
_convert_to_indexer(
self.get_loc_level(k, level=i, drop_level=False)[0]
),
indexer=indexer,
)
# empty indexer
if indexer is None:
return Int64Index([])._ndarray_values
indexer = self._reorder_indexer(seq, indexer)
return indexer._ndarray_values
def _reorder_indexer(
self, seq: Tuple[Union[Scalar, Iterable, AnyArrayLike], ...], indexer: ArrayLike
) -> ArrayLike:
"""
Reorder an indexer of a MultiIndex (self) so that the label are in the
same order as given in seq
Parameters
----------
seq : label/slice/list/mask or a sequence of such
indexer: an Int64Index indexer of self
Returns
-------
indexer : a sorted Int64Index indexer of self ordered as seq
"""
# If the index is lexsorted and the list_like label in seq are sorted
# then we do not need to sort
if self.is_lexsorted():
need_sort = False
for i, k in enumerate(seq):
if is_list_like(k):
if not need_sort:
k_codes = self.levels[i].get_indexer(k)
k_codes = k_codes[k_codes >= 0] # Filter absent keys
# True if the given codes are not ordered
need_sort = (k_codes[:-1] > k_codes[1:]).any()
# Bail out if both index and seq are sorted
if not need_sort:
return indexer
n = len(self)
keys: Tuple[np.ndarray, ...] = tuple()
# For each level of the sequence in seq, map the level codes with the
# order they appears in a list-like sequence
# This mapping is then use to reorder the indexer
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
new_order = np.arange(n)[indexer]
elif is_list_like(k):
# Generate a map with all level codes as sorted initially
key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(
self.levels[i]
)
# Set order as given in the indexer list
level_indexer = self.levels[i].get_indexer(k)
level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys
key_order_map[level_indexer] = np.arange(len(level_indexer))
new_order = key_order_map[self.codes[i][indexer]]
else:
# For all other case, use the same order as the level
new_order = np.arange(n)[indexer]
keys = (new_order,) + keys
# Find the reordering using lexsort on the keys mapping
ind = np.lexsort(keys)
return indexer[ind]
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError("after < before")
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_codes = [level_codes[left:right] for level_codes in self.codes]
new_codes[0] = new_codes[0] - i
return MultiIndex(levels=new_levels, codes=new_codes, verify_integrity=False)
def equals(self, other) -> bool:
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if not isinstance(other, MultiIndex):
# d-level MultiIndex can equal d-tuple Index
if not is_object_dtype(other.dtype):
if self.nlevels != other.nlevels:
return False
other_vals = com.values_from_object(ensure_index(other))
return array_equivalent(self._ndarray_values, other_vals)
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
self_codes = self_codes[self_codes != -1]
self_values = algos.take_nd(
np.asarray(self.levels[i]._values), self_codes, allow_fill=False
)
other_codes = other.codes[i]
other_codes = other_codes[other_codes != -1]
other_values = algos.take_nd(
np.asarray(other.levels[i]._values), other_codes, allow_fill=False
)
# since we use NaT both datetime64 and timedelta64
# we can have a situation where a level is typed say
# timedelta64 in self (IOW it has other values than NaT)
# but types datetime64 in other (where its all NaT)
# but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
if not array_equivalent(self_values, other_values):
return False
return True
def equal_levels(self, other) -> bool:
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
# --------------------------------------------------------------------
# Set Methods
def union(self, other, sort=None):
"""
Form the union of two MultiIndex objects
Parameters
----------
other : MultiIndex or array / Index of tuples
sort : False or None, default None
Whether to sort the resulting Index.
* None : Sort the result, except when
1. `self` and `other` are equal.
2. `self` has length 0.
3. Some values in `self` or `other` cannot be compared.
A RuntimeWarning is issued in this case.
* False : do not sort the result.
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
Index
>>> index.union(index2)
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
# TODO: Index.union returns other when `len(self)` is 0.
uniq_tuples = lib.fast_unique_multiple(
[self._ndarray_values, other._ndarray_values], sort=sort
)
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def intersection(self, other, sort=False):
"""
Form the intersection of two MultiIndex objects.
Parameters
----------
other : MultiIndex or array / Index of tuples
sort : False or None, default False
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default from ``True`` to ``False``, to match
behaviour from before 0.24.0
Returns
-------
Index
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if self.equals(other):
return self
lvals = self._ndarray_values
rvals = other._ndarray_values
uniq_tuples = None # flag whether _inner_indexer was succesful
if self.is_monotonic and other.is_monotonic:
try:
uniq_tuples = self._inner_indexer(lvals, rvals)[0]
sort = False # uniq_tuples is already sorted
except TypeError:
pass
if uniq_tuples is None:
other_uniq = set(rvals)
seen = set()
uniq_tuples = [
x for x in lvals if x in other_uniq and not (x in seen or seen.add(x))
]
if sort is None:
uniq_tuples = sorted(uniq_tuples)
if len(uniq_tuples) == 0:
return MultiIndex(
levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def difference(self, other, sort=None):
"""
Compute set difference of two MultiIndex objects
Parameters
----------
other : MultiIndex
sort : False or None, default None
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
diff : MultiIndex
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0:
return self
if self.equals(other):
return MultiIndex(
levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)
difference = this.values.take(label_diff)
if sort is None:
difference = sorted(difference)
if len(difference) == 0:
return MultiIndex(
levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
def _convert_can_do_setop(self, other):
result_names = self.names
if not hasattr(other, "names"):
if len(other) == 0:
other = MultiIndex(
levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
verify_integrity=False,
)
else:
msg = "other must be a MultiIndex or a list of tuples"
try:
other = MultiIndex.from_tuples(other)
except TypeError as err:
raise TypeError(msg) from err
else:
result_names = self.names if self.names == other.names else None
return other, result_names
# --------------------------------------------------------------------
@Appender(Index.astype.__doc__)
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_categorical_dtype(dtype):
msg = "> 1 ndim Categorical are not supported at this time"
raise NotImplementedError(msg)
elif not is_object_dtype(dtype):
raise TypeError(
f"Setting {type(self)} dtype to anything other "
"than object is not supported"
)
elif copy is True:
return self._shallow_copy()
return self
def insert(self, loc: int, item):
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
# Pad the key with empty strings if lower levels of the key
# aren't specified:
if not isinstance(item, tuple):
item = (item,) + ("",) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError("Item must have length equal to number of levels.")
new_levels = []
new_codes = []
for k, level, level_codes in zip(item, self.levels, self.codes):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other codes
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))
return MultiIndex(
levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False
)
def delete(self, loc):
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
verify_integrity=False,
)
def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
return MultiIndex.from_tuples(joined, names=names)
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
values = MultiIndex.from_tuples(values, names=self.names).values
return algos.isin(self.values, values)
else:
num = self._get_level_number(level)
levs = self.get_level_values(num)
if levs.size == 0:
return np.zeros(len(levs), dtype=np.bool_)
return levs.isin(values)
MultiIndex._add_numeric_methods_disabled()
MultiIndex._add_numeric_methods_add_sub_disabled()
MultiIndex._add_logical_methods_disabled()
def _sparsify(label_list, start: int = 0, sentinel=""):
pivoted = list(zip(*label_list))
k = len(label_list)
result = pivoted[: start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1 :]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return list(zip(*result))
def _get_na_rep(dtype) -> str:
return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype, "NaN")
def maybe_droplevels(index, key):
"""
Attempt to drop level or levels from the given index.
Parameters
----------
index: Index
key : scalar or tuple
Returns
-------
Index
"""
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index.droplevel(0)
except ValueError:
# we have dropped too much, so back out
return original_index
else:
try:
index = index.droplevel(0)
except ValueError:
pass
return index
def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:
"""
Coerce the array_like indexer to the smallest integer dtype that can encode all
of the given categories.
Parameters
----------
array_like : array-like
categories : array-like
copy : bool
Returns
-------
np.ndarray
Non-writeable.
"""
array_like = coerce_indexer_dtype(array_like, categories)
if copy:
array_like = array_like.copy()
array_like.flags.writeable = False
return array_like
|
""" return a boolean if we need a qualified .info display """
def f(l):
return "mixed" in l or "string" in l or "unicode" in l
return any(f(l) for l in self._inferred_type_levels)
|
issues.rs
|
//#![feature(trace_macros)]
#![allow(dead_code)]
#![cfg_attr(feature = "cargo-clippy", allow(redundant_closure))]
use crates_unittest::test_case;
use std::prelude::v1::*;
use nom::{
character::{is_digit, streaming::space1 as space},
error::ErrorKind,
number::streaming::le_u64,
Err, IResult, Needed,
};
#[allow(dead_code)]
struct Range {
start: char,
end: char,
}
pub fn take_char(input: &[u8]) -> IResult<&[u8], char> {
if !input.is_empty() {
Ok((&input[1..], input[0] as char))
} else {
Err(Err::Incomplete(Needed::Size(1)))
}
}
//trace_macros!(true);
#[allow(dead_code)]
named!(range<&[u8], Range>,
alt!(
do_parse!(
start: take_char >>
tag!("-") >>
end: take_char >>
(Range {
start: start,
end: end,
})
) |
map!(
take_char,
|c| {
Range {
start: c,
end: c,
}
}
)
)
);
#[allow(dead_code)]
|
|cs| {
cs
}
)
);
#[test_case]
fn issue_58() {
let _ = range(&b"abcd"[..]);
let _ = literal(&b"abcd"[..]);
}
//trace_macros!(false);
mod parse_int {
use nom::HexDisplay;
use nom::{
character::streaming::{digit1 as digit, space1 as space},
IResult,
};
use std::str;
use crates_unittest::test_case;
use std::prelude::v1::*;
named!(parse_ints<Vec<i32>>, many0!(spaces_or_int));
fn spaces_or_int(input: &[u8]) -> IResult<&[u8], i32> {
println!("{}", input.to_hex(8));
do_parse!(
input,
opt!(complete!(space))
>> res: map!(complete!(digit), |x| {
println!("x: {:?}", x);
let result = str::from_utf8(x).unwrap();
println!("Result: {}", result);
println!("int is empty?: {}", x.is_empty());
match result.parse() {
Ok(i) => i,
Err(e) => panic!("UH OH! NOT A DIGIT! {:?}", e),
}
})
>> (res)
)
}
#[test_case]
fn issue_142() {
let subject = parse_ints(&b"12 34 5689a"[..]);
let expected = Ok((&b"a"[..], vec![12, 34, 5689]));
assert_eq!(subject, expected);
let subject = parse_ints(&b"12 34 5689 "[..]);
let expected = Ok((&b" "[..], vec![12, 34, 5689]));
assert_eq!(subject, expected)
}
}
#[test_case]
fn usize_length_bytes_issue() {
use nom::number::streaming::be_u16;
let _: IResult<&[u8], &[u8], (&[u8], ErrorKind)> = length_data!(b"012346", be_u16);
}
/*
DOES NOT COMPILE
#[test_case]
fn issue_152() {
named!(take4, take!(4));
named!(xyz, tag!("XYZ"));
named!(abc, tag!("abc"));
named!(sw,
switch!(take4,
b"abcd" => xyz |
b"efgh" => abc
)
);
}
*/
#[test_case]
fn take_till_issue() {
named!(nothing, take_till!(call!(|_| true)));
assert_eq!(nothing(b""), Err(Err::Incomplete(Needed::Size(1))));
assert_eq!(nothing(b"abc"), Ok((&b"abc"[..], &b""[..])));
}
// named!(
// issue_498<Vec<&[u8]>>,
// separated_nonempty_list!(opt!(space), tag!("abcd"))
// );
named!(issue_308(&str) -> bool,
do_parse! (
tag! ("foo") >>
b: alt! (
complete!(map! (tag! ("1"), |_: &str|->bool {true})) |
value! (false)
) >>
(b) ));
fn issue_302(input: &[u8]) -> IResult<&[u8], Option<Vec<u64>>> {
do_parse!(input, entries: cond!(true, count!(le_u64, 3)) >> (entries))
}
#[test_case]
fn issue_655() {
use nom::character::streaming::{line_ending, not_line_ending};
named!(twolines(&str) -> (&str, &str),
do_parse!(
l1 : not_line_ending >>
line_ending >>
l2 : not_line_ending >>
line_ending >>
((l1, l2))
)
);
assert_eq!(twolines("foo\nbar\n"), Ok(("", ("foo", "bar"))));
assert_eq!(twolines("féo\nbar\n"), Ok(("", ("féo", "bar"))));
assert_eq!(twolines("foé\nbar\n"), Ok(("", ("foé", "bar"))));
assert_eq!(twolines("foé\r\nbar\n"), Ok(("", ("foé", "bar"))));
}
#[test_case]
fn issue_721() {
named!(f1<&str, u16>, parse_to!(u16));
named!(f2<&str, String>, parse_to!(String));
assert_eq!(f1("1234"), Ok(("", 1234)));
assert_eq!(f2("foo"), Ok(("", "foo".to_string())));
//assert_eq!(parse_to!("1234", u16), Ok(("", 1234)));
//assert_eq!(parse_to!("foo", String), Ok(("", "foo".to_string())));
}
named!(issue_717<&[u8], Vec<&[u8]> >,
separated_list0!(tag!([0x0]), is_not!([0x0u8]))
);
struct NoPartialEq {
value: i32,
}
named!(issue_724<&str, i32>,
do_parse!(
metadata: permutation!(
map!(tag!("hello"), |_| NoPartialEq { value: 1 }),
map!(tag!("world"), |_| NoPartialEq { value: 2 })
) >>
(metadata.0.value + metadata.1.value)
)
);
#[test_case]
fn issue_752() {
assert_eq!(
Err::Error(("ab", nom::error::ErrorKind::ParseTo)),
parse_to!("ab", usize).unwrap_err()
)
}
fn atom_specials(c: u8) -> bool {
c == b'q'
}
named!(
capability<&str>,
do_parse!(tag!(" ") >> _atom: map_res!(take_till1!(atom_specials), std::str::from_utf8) >> ("a"))
);
#[test_case]
fn issue_759() {
assert_eq!(capability(b" abcqd"), Ok((&b"qd"[..], "a")));
}
named_args!(issue_771(count: usize)<Vec<u32>>,
length_count!(value!(count), call!(nom::number::streaming::be_u32))
);
/// This test is in a separate module to check that all required symbols are imported in
/// `escaped_transform!()`. Without the module, the `use`-es of the current module would
/// mask the error ('"Use of undeclared type or module `Needed`" in escaped_transform!').
mod issue_780 {
use std::prelude::v1::*;
named!(issue_780<&str, String>,
escaped_transform!(call!(::nom::character::streaming::alpha1), '\\', tag!("n"))
);
}
// issue 617
named!(digits, take_while1!(is_digit));
named!(multi_617<&[u8], () >, fold_many0!( digits, (), |_, _| {}));
// Sad :(
named!(multi_617_fails<&[u8], () >, fold_many0!( take_while1!( is_digit ), (), |_, _| {}));
mod issue_647 {
use crates_unittest::test_case;
use std::prelude::v1::*;
use nom::{error::ErrorKind, number::streaming::be_f64, Err};
pub type Input<'a> = &'a [u8];
#[derive(PartialEq, Debug, Clone)]
struct Data {
c: f64,
v: Vec<f64>,
}
fn list<'a, 'b>(
input: Input<'a>,
_cs: &'b f64,
) -> Result<(Input<'a>, Vec<f64>), Err<(&'a [u8], ErrorKind)>> {
separated_list0!(input, complete!(tag!(",")), complete!(be_f64))
}
named!(data<Input,Data>, map!(
do_parse!(
c: be_f64 >>
tag!("\n") >>
v: call!(list,&c) >>
(c,v)
), |(c,v)| {
Data {
c: c,
v: v
}
}
));
}
named!(issue_775, take_till1!(|_| true));
#[test_case]
fn issue_848_overflow_incomplete_bits_to_bytes() {
named!(take, take!(0x2000000000000000));
named!(parser<&[u8], &[u8]>, bits!(bytes!(take)));
assert_eq!(
parser(&b""[..]),
Err(Err::Failure(error_position!(&b""[..], ErrorKind::TooLarge)))
);
}
#[test_case]
fn issue_942() {
use nom::error::ParseError;
pub fn parser<'a, E: ParseError<&'a str>>(i: &'a str) -> IResult<&'a str, usize, E> {
use nom::{character::complete::char, error::context, multi::many0_count};
many0_count(context("char_a", char('a')))(i)
}
assert_eq!(parser::<()>("aaa"), Ok(("", 3)));
}
#[test_case]
fn issue_many_m_n_with_zeros() {
use nom::character::complete::char;
use nom::multi::many_m_n;
let parser = many_m_n::<_, _, (), _>(0, 0, char('a'));
assert_eq!(parser("aaa"), Ok(("aaa", vec!())));
}
#[test_case]
fn issue_1027_convert_error_panic_nonempty() {
use nom::character::complete::char;
use nom::error::{convert_error, VerboseError};
use nom::sequence::pair;
let input = "a";
let result: IResult<_, _, VerboseError<&str>> = pair(char('a'), char('b'))(input);
let err = match result.unwrap_err() {
Err::Error(e) => e,
_ => unreachable!(),
};
let msg = convert_error(&input, err);
assert_eq!(
msg,
"0: at line 1:\na\n ^\nexpected \'b\', got end of input\n\n"
);
}
|
named!(literal<&[u8], Vec<char> >,
map!(
many1!(take_char),
|
replacer.go
|
package replacer
import (
"context"
"strconv"
"strings"
"time"
"github.com/coredns/coredns/plugin/metadata"
"github.com/coredns/coredns/plugin/pkg/dnstest"
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
)
// Replacer replaces labels for values in strings.
type Replacer struct {
valueFunc func(request.Request, *dnstest.Recorder, string) string
labels []string
}
// labels are all supported labels that can be used in the default Replacer.
var labels = []string{
"{type}",
"{name}",
"{class}",
"{proto}",
"{size}",
"{remote}",
"{port}",
"{local}",
// Header values.
headerReplacer + "id}",
headerReplacer + "opcode}",
headerReplacer + "do}",
headerReplacer + "bufsize}",
// Recorded replacements.
"{rcode}",
"{rsize}",
"{duration}",
headerReplacer + "rrflags}",
}
// value returns the current value of label.
func value(state request.Request, rr *dnstest.Recorder, label string) string {
switch label {
case "{type}":
return state.Type()
case "{name}":
return state.Name()
case "{class}":
return state.Class()
case "{proto}":
return state.Proto()
case "{size}":
return strconv.Itoa(state.Req.Len())
case "{remote}":
return addrToRFC3986(state.IP())
case "{port}":
return state.Port()
case "{local}":
return addrToRFC3986(state.LocalIP())
// Header placeholders (case-insensitive).
case headerReplacer + "id}":
return strconv.Itoa(int(state.Req.Id))
case headerReplacer + "opcode}":
return strconv.Itoa(state.Req.Opcode)
case headerReplacer + "do}":
return boolToString(state.Do())
case headerReplacer + "bufsize}":
return strconv.Itoa(state.Size())
// Recorded replacements.
case "{rcode}":
if rr == nil {
return EmptyValue
}
rcode := dns.RcodeToString[rr.Rcode]
if rcode == "" {
rcode = strconv.Itoa(rr.Rcode)
}
return rcode
case "{rsize}":
if rr == nil {
return EmptyValue
}
return strconv.Itoa(rr.Len)
case "{duration}":
if rr == nil {
return EmptyValue
}
return strconv.FormatFloat(time.Since(rr.Start).Seconds(), 'f', -1, 64) + "s"
case headerReplacer + "rrflags}":
if rr != nil && rr.Msg != nil {
return flagsToString(rr.Msg.MsgHdr)
}
return EmptyValue
}
return EmptyValue
}
// New makes a new replacer. This only needs to be called once in the setup and then call Replace for each incoming message.
// A replacer is safe for concurrent use.
func New() Replacer {
return Replacer{
valueFunc: value,
labels: labels,
}
}
// Replace performs a replacement of values on s and returns the string with the replaced values.
func (r Replacer) Replace(ctx context.Context, state request.Request, rr *dnstest.Recorder, s string) string {
for _, placeholder := range r.labels {
if strings.Contains(s, placeholder) {
s = strings.Replace(s, placeholder, r.valueFunc(state, rr, placeholder), -1)
}
}
// Metadata label replacements. Scan for {/ and search for next }, replace that metadata label with
// any meta data that is available.
b := strings.Builder{}
for strings.Contains(s, labelReplacer) {
idxStart := strings.Index(s, labelReplacer)
endOffset := idxStart + len(labelReplacer)
idxEnd := strings.Index(s[endOffset:], "}")
if idxEnd > -1 {
label := s[idxStart+2 : endOffset+idxEnd]
fm := metadata.ValueFunc(ctx, label)
replacement := EmptyValue
if fm != nil {
replacement = fm()
}
b.WriteString(s[:idxStart])
b.WriteString(replacement)
s = s[endOffset+idxEnd+1:]
} else {
break
}
}
b.WriteString(s)
return b.String()
}
func boolToString(b bool) string {
if b {
return "true"
}
return "false"
}
// flagsToString checks all header flags and returns those
// that are set as a string separated with commas
func flagsToString(h dns.MsgHdr) string {
flags := make([]string, 7)
i := 0
if h.Response {
flags[i] = "qr"
i++
}
if h.Authoritative {
flags[i] = "aa"
i++
}
if h.Truncated {
flags[i] = "tc"
i++
}
if h.RecursionDesired {
flags[i] = "rd"
i++
}
if h.RecursionAvailable {
flags[i] = "ra"
i++
}
if h.Zero {
flags[i] = "z"
i++
}
if h.AuthenticatedData {
flags[i] = "ad"
i++
}
if h.CheckingDisabled {
flags[i] = "cd"
i++
}
return strings.Join(flags[:i], ",")
}
// addrToRFC3986 will add brackets to the address if it is an IPv6 address.
func addrToRFC3986(addr string) string
|
const (
headerReplacer = "{>"
labelReplacer = "{/"
// EmptyValue is the default empty value.
EmptyValue = "-"
)
|
{
if strings.Contains(addr, ":") {
return "[" + addr + "]"
}
return addr
}
|
parse_freecells_invalid.rs
|
use freecell::parse_freecells;
#[test]
fn
|
() {
assert_eq!(
parse_freecells("KD XX 8H"),
Err("Could not parse freecells: \"KD XX 8H\"".to_string())
);
}
#[test]
fn test_comma_separated() {
assert_eq!(
parse_freecells("JH, TD, 9H"),
Err("Could not parse freecells: \"JH, TD, 9H\"".to_string())
);
}
#[test]
fn test_too_many_cards() {
assert_eq!(
parse_freecells("JH TD 9H 6C 5S"),
Err("Could not parse freecells: \"JH TD 9H 6C 5S\"".to_string())
);
}
|
test_invalid_card
|
route.go
|
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package route
import (
"fmt"
"sort"
"strconv"
"strings"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
xdsfault "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3"
xdshttpfault "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3"
matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
xdstype "github.com/envoyproxy/go-control-plane/envoy/type/v3"
wellknown "github.com/envoyproxy/go-control-plane/pkg/wellknown"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/duration"
"github.com/golang/protobuf/ptypes/wrappers"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/route/retry"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/util/gogo"
"istio.io/pkg/log"
)
// Headers with special meaning in Envoy
const (
HeaderMethod = ":method"
HeaderAuthority = ":authority"
HeaderScheme = ":scheme"
)
// DefaultRouteName is the name assigned to a route generated by default in absence of a virtual service.
const DefaultRouteName = "default"
// maxRegExProgramSize defines the max regx complexity supported. 1024 is a safe default and should work
// for most cases. We should look to make it configurable if this is not sufficient.
// Note that this is different from length of regex.
// Refer to https://github.com/google/re2/blob/a98fad02c421896bc75d97f49ccd245cdce7dd55/re2/re2.h#L287 for details.
const maxRegExProgramSize = 1024
var (
regexEngine = &matcher.RegexMatcher_GoogleRe2{GoogleRe2: &matcher.RegexMatcher_GoogleRE2{}}
regexEngineWithMaxProgramSize = &matcher.RegexMatcher_GoogleRe2{GoogleRe2: &matcher.RegexMatcher_GoogleRE2{
MaxProgramSize: &wrappers.UInt32Value{
Value: uint32(maxRegExProgramSize),
},
}}
)
// VirtualHostWrapper is a context-dependent virtual host entry with guarded routes.
// Note: Currently we are not fully utilizing this structure. We could invoke this logic
// once for all sidecars in the cluster to compute all RDS for inside the mesh and arrange
// it by listener port. However to properly use such an optimization, we need to have an
// eventing subsystem to invalidate the computed routes if any service changes/virtual services change.
type VirtualHostWrapper struct {
// Port is the listener port for outbound sidecar (e.g. service port)
Port int
// Services are the services from the registry. Each service
// in this list should have a virtual host entry
Services []*model.Service
// VirtualServiceHosts is a list of hosts defined in the virtual service
// if virtual service hostname is same as a the service registry host, then
// the host would appear in Services as we need to generate all variants of the
// service's hostname within a platform (e.g., foo, foo.default, foo.default.svc, etc.)
VirtualServiceHosts []string
// Routes in the virtual host
Routes []*route.Route
}
// BuildSidecarVirtualHostsFromConfigAndRegistry creates virtual hosts from
// the given set of virtual services and a list of services from the
// service registry. Services are indexed by FQDN hostnames.
// The list of services is also passed to allow maintaining consistent ordering.
func BuildSidecarVirtualHostsFromConfigAndRegistry(node *model.Proxy, push *model.PushContext, serviceRegistry map[host.Name]*model.Service,
virtualServices []config.Config, listenPort int) []VirtualHostWrapper {
out := make([]VirtualHostWrapper, 0)
// translate all virtual service configs into virtual hosts
for _, virtualService := range virtualServices {
wrappers := buildSidecarVirtualHostsForVirtualService(node, push, virtualService, serviceRegistry, listenPort)
if len(wrappers) == 0 {
// If none of the routes matched by source (i.e. proxyLabels), then discard this entire virtual service
continue
}
out = append(out, wrappers...)
}
// compute services missing virtual service configs
missing := make(map[host.Name]struct{})
for fqdn := range serviceRegistry {
missing[fqdn] = struct{}{}
}
for _, wrapper := range out {
for _, service := range wrapper.Services {
delete(missing, service.Hostname)
}
}
// append default hosts for the service missing virtual services
for hn := range missing {
svc := serviceRegistry[hn]
for _, port := range svc.Ports {
if port.Protocol.IsHTTP() || util.IsProtocolSniffingEnabledForPort(port) {
cluster := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", svc.Hostname, port.Port)
traceOperation := traceOperation(string(svc.Hostname), port.Port)
httpRoute := BuildDefaultHTTPOutboundRoute(node, cluster, traceOperation)
// if this host has no virtualservice, the consistentHash on its destinationRule will be useless
if hashPolicy := getHashPolicyByService(node, push, svc, port); hashPolicy != nil {
httpRoute.GetRoute().HashPolicy = []*route.RouteAction_HashPolicy{hashPolicy}
}
out = append(out, VirtualHostWrapper{
Port: port.Port,
Services: []*model.Service{svc},
Routes: []*route.Route{httpRoute},
})
}
}
}
return out
}
// separateVSHostsAndServices splits the virtual service hosts into services (if they are found in the registry) and
// plain non-registry hostnames
func separateVSHostsAndServices(virtualService config.Config,
serviceRegistry map[host.Name]*model.Service) ([]string, []*model.Service) {
rule := virtualService.Spec.(*networking.VirtualService)
hosts := make([]string, 0)
servicesInVirtualService := make([]*model.Service, 0)
wchosts := make([]host.Name, 0)
// As a performance optimization, process non wildcard hosts first, so that they can be
// looked up directly in the service registry map.
for _, hostname := range rule.Hosts {
vshost := host.Name(hostname)
if !vshost.IsWildCarded() {
if svc, exists := serviceRegistry[vshost]; exists {
servicesInVirtualService = append(servicesInVirtualService, svc)
} else {
hosts = append(hosts, hostname)
}
} else {
// Add it to the wildcard hosts so that they can be processed later.
wchosts = append(wchosts, vshost)
}
}
// Now process wild card hosts as they need to follow the slow path of looping through all services in the registry.
for _, hostname := range wchosts {
// Say host is *.global
foundSvcMatch := false
// Say we have services *.foo.global, *.bar.global
for svcHost, svc := range serviceRegistry {
// *.foo.global matches *.global
if svcHost.Matches(hostname) {
servicesInVirtualService = append(servicesInVirtualService, svc)
foundSvcMatch = true
}
}
if !foundSvcMatch {
hosts = append(hosts, string(hostname))
}
}
return hosts, servicesInVirtualService
}
// buildSidecarVirtualHostsForVirtualService creates virtual hosts corresponding to a virtual service.
// Called for each port to determine the list of vhosts on the given port.
// It may return an empty list if no VirtualService rule has a matching service.
func buildSidecarVirtualHostsForVirtualService(
node *model.Proxy,
push *model.PushContext,
virtualService config.Config,
serviceRegistry map[host.Name]*model.Service,
listenPort int) []VirtualHostWrapper {
hosts, servicesInVirtualService := separateVSHostsAndServices(virtualService, serviceRegistry)
// Now group these services by port so that we can infer the destination.port if the user
// doesn't specify any port for a multiport service. We need to know the destination port in
// order to build the cluster name (outbound|<port>|<subset>|<serviceFQDN>)
// If the destination service is being accessed on port X, we set that as the default
// destination port
serviceByPort := make(map[int][]*model.Service)
for _, svc := range servicesInVirtualService {
for _, port := range svc.Ports {
if port.Protocol.IsHTTP() || util.IsProtocolSniffingEnabledForPort(port) {
serviceByPort[port.Port] = append(serviceByPort[port.Port], svc)
}
}
}
// We need to group the virtual hosts by port, because each http connection manager is
// going to send a separate RDS request
// Note that we need to build non-default HTTP routes only for the virtual services.
// The services in the serviceRegistry will always have a default route (/)
if len(serviceByPort) == 0 {
// This is a gross HACK. Fix me. Its a much bigger surgery though, due to the way
// the current code is written.
serviceByPort[80] = nil
}
meshGateway := map[string]bool{constants.IstioMeshGateway: true}
out := make([]VirtualHostWrapper, 0, len(serviceByPort))
routes, err := BuildHTTPRoutesForVirtualService(node, push, virtualService, serviceRegistry, listenPort, meshGateway)
if err != nil || len(routes) == 0 {
return out
}
for port, portServices := range serviceByPort {
out = append(out, VirtualHostWrapper{
Port: port,
Services: portServices,
VirtualServiceHosts: hosts,
Routes: routes,
})
}
return out
}
// GetDestinationCluster generates a cluster name for the route, or error if no cluster
// can be found. Called by translateRule to determine if
func GetDestinationCluster(destination *networking.Destination, service *model.Service, listenerPort int) string {
port := listenerPort
if destination.GetPort() != nil {
port = int(destination.GetPort().GetNumber())
} else if service != nil && len(service.Ports) == 1 {
// if service only has one port defined, use that as the port, otherwise use default listenerPort
port = service.Ports[0].Port
// Do not return blackhole cluster for service==nil case as there is a legitimate use case for
// calling this function with nil service: to route to a pre-defined statically configured cluster
// declared as part of the bootstrap.
// If blackhole cluster is needed, do the check on the caller side. See gateway and tls.go for examples.
}
return model.BuildSubsetKey(model.TrafficDirectionOutbound, destination.Subset, host.Name(destination.Host), port)
}
// BuildHTTPRoutesForVirtualService creates data plane HTTP routes from the virtual service spec.
// The rule should be adapted to destination names (outbound clusters).
// Each rule is guarded by source labels.
//
// This is called for each port to compute virtual hosts.
// Each VirtualService is tried, with a list of services that listen on the port.
// Error indicates the given virtualService can't be used on the port.
// This function is used by both the gateway and the sidecar
func BuildHTTPRoutesForVirtualService(
node *model.Proxy,
push *model.PushContext,
virtualService config.Config,
serviceRegistry map[host.Name]*model.Service,
listenPort int,
gatewayNames map[string]bool) ([]*route.Route, error) {
vs, ok := virtualService.Spec.(*networking.VirtualService)
if !ok { // should never happen
return nil, fmt.Errorf("in not a virtual service: %#v", virtualService)
}
out := make([]*route.Route, 0, len(vs.Http))
allroutes:
for _, http := range vs.Http {
if len(http.Match) == 0 {
if r := translateRoute(push, node, http, nil, listenPort, virtualService, serviceRegistry, gatewayNames); r != nil {
out = append(out, r)
}
// We have a rule with catch all match. Other rules are of no use.
break
} else {
for _, match := range http.Match {
if r := translateRoute(push, node, http, match, listenPort, virtualService, serviceRegistry, gatewayNames); r != nil {
out = append(out, r)
// This is a catch all path. Routes are matched in order, so we will never go beyond this match
// As an optimization, we can just top sending any more routes here.
if isCatchAllMatch(match) {
break allroutes
}
}
}
}
}
if len(out) == 0 {
return nil, fmt.Errorf("no routes matched")
}
return out, nil
}
// sourceMatchHttp checks if the sourceLabels or the gateways in a match condition match with the
// labels for the proxy or the gateway name for which we are generating a route
func sourceMatchHTTP(match *networking.HTTPMatchRequest, proxyLabels labels.Collection, gatewayNames map[string]bool, proxyNamespace string) bool {
if match == nil {
return true
}
// Trim by source labels or mesh gateway
if len(match.Gateways) > 0 {
for _, g := range match.Gateways {
if gatewayNames[g] {
return true
}
}
} else if proxyLabels.IsSupersetOf(match.GetSourceLabels()) {
return match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace
}
return false
}
// translateRoute translates HTTP routes
func translateRoute(push *model.PushContext, node *model.Proxy, in *networking.HTTPRoute,
match *networking.HTTPMatchRequest, port int,
virtualService config.Config,
serviceRegistry map[host.Name]*model.Service,
gatewayNames map[string]bool) *route.Route {
// When building routes, its okay if the target cluster cannot be
// resolved Traffic to such clusters will blackhole.
// Match by source labels/gateway names inside the match condition
if !sourceMatchHTTP(match, labels.Collection{node.Metadata.Labels}, gatewayNames, node.Metadata.Namespace) {
return nil
}
// Match by the destination port specified in the match condition
if match != nil && match.Port != 0 && match.Port != uint32(port) {
return nil
}
out := &route.Route{
Match: translateRouteMatch(match, node),
Metadata: util.BuildConfigInfoMetadata(virtualService.Meta),
}
routeName := in.Name
if match != nil && match.Name != "" {
routeName = routeName + "." + match.Name
}
// add a name to the route
out.Name = routeName
operations := translateHeadersOperations(in.Headers)
out.RequestHeadersToAdd = operations.requestHeadersToAdd
out.ResponseHeadersToAdd = operations.responseHeadersToAdd
out.RequestHeadersToRemove = operations.requestHeadersToRemove
out.ResponseHeadersToRemove = operations.responseHeadersToRemove
out.TypedPerFilterConfig = make(map[string]*any.Any)
if redirect := in.Redirect; redirect != nil {
action := &route.Route_Redirect{
Redirect: &route.RedirectAction{
HostRedirect: redirect.Authority,
PathRewriteSpecifier: &route.RedirectAction_PathRedirect{
PathRedirect: redirect.Uri,
},
}}
switch in.Redirect.RedirectCode {
case 0, 301:
action.Redirect.ResponseCode = route.RedirectAction_MOVED_PERMANENTLY
case 302:
action.Redirect.ResponseCode = route.RedirectAction_FOUND
case 303:
action.Redirect.ResponseCode = route.RedirectAction_SEE_OTHER
case 307:
action.Redirect.ResponseCode = route.RedirectAction_TEMPORARY_REDIRECT
case 308:
action.Redirect.ResponseCode = route.RedirectAction_PERMANENT_REDIRECT
default:
log.Warnf("Redirect Code %d is not yet supported", in.Redirect.RedirectCode)
action = nil
}
out.Action = action
} else {
action := &route.RouteAction{
Cors: translateCORSPolicy(in.CorsPolicy, node),
RetryPolicy: retry.ConvertPolicy(in.Retries),
}
// Configure timeouts specified by Virtual Service if they are provided, otherwise set it to defaults.
var d *duration.Duration
if in.Timeout != nil {
d = gogo.DurationToProtoDuration(in.Timeout)
} else {
d = features.DefaultRequestTimeout
}
action.Timeout = d
if util.IsIstioVersionGE18(node) {
if maxDuration := action.MaxStreamDuration; maxDuration != nil {
maxDuration.MaxStreamDuration = d
} else {
action.MaxStreamDuration = &route.RouteAction_MaxStreamDuration{
MaxStreamDuration: d,
}
}
} else {
// nolint: staticcheck
action.MaxGrpcTimeout = d
}
out.Action = &route.Route_Route{Route: action}
if rewrite := in.Rewrite; rewrite != nil {
action.PrefixRewrite = rewrite.Uri
action.HostRewriteSpecifier = &route.RouteAction_HostRewriteLiteral{
HostRewriteLiteral: rewrite.Authority,
}
}
if in.Mirror != nil {
if mp := mirrorPercent(in); mp != nil {
action.RequestMirrorPolicies = []*route.RouteAction_RequestMirrorPolicy{{
Cluster: GetDestinationCluster(in.Mirror, serviceRegistry[host.Name(in.Mirror.Host)], port),
RuntimeFraction: mp,
TraceSampled: &wrappers.BoolValue{Value: false},
}}
}
}
// TODO: eliminate this logic and use the total_weight option in envoy route
weighted := make([]*route.WeightedCluster_ClusterWeight, 0)
for _, dst := range in.Route {
weight := &wrappers.UInt32Value{Value: uint32(dst.Weight)}
if dst.Weight == 0 {
// Ignore 0 weighted clusters if there are other clusters in the route.
// But if this is the only cluster in the route, then add it as a cluster with weight 100
if len(in.Route) == 1 {
weight.Value = uint32(100)
} else {
continue
}
}
operations := translateHeadersOperations(dst.Headers)
hostname := host.Name(dst.GetDestination().GetHost())
n := GetDestinationCluster(dst.Destination, serviceRegistry[hostname], port)
clusterWeight := &route.WeightedCluster_ClusterWeight{
Name: n,
Weight: weight,
RequestHeadersToAdd: operations.requestHeadersToAdd,
RequestHeadersToRemove: operations.requestHeadersToRemove,
ResponseHeadersToAdd: operations.responseHeadersToAdd,
ResponseHeadersToRemove: operations.responseHeadersToRemove,
}
weighted = append(weighted, clusterWeight)
var configNamespace string
if serviceRegistry[hostname] != nil {
configNamespace = serviceRegistry[hostname].Attributes.Namespace
}
hashPolicy := getHashPolicy(push, node, dst, configNamespace)
if hashPolicy != nil {
action.HashPolicy = append(action.HashPolicy, hashPolicy)
}
}
// rewrite to a single cluster if there is only weighted cluster
if len(weighted) == 1 {
action.ClusterSpecifier = &route.RouteAction_Cluster{Cluster: weighted[0].Name}
out.RequestHeadersToAdd = append(out.RequestHeadersToAdd, weighted[0].RequestHeadersToAdd...)
out.RequestHeadersToRemove = append(out.RequestHeadersToRemove, weighted[0].RequestHeadersToRemove...)
out.ResponseHeadersToAdd = append(out.ResponseHeadersToAdd, weighted[0].ResponseHeadersToAdd...)
out.ResponseHeadersToRemove = append(out.ResponseHeadersToRemove, weighted[0].ResponseHeadersToRemove...)
} else {
action.ClusterSpecifier = &route.RouteAction_WeightedClusters{
WeightedClusters: &route.WeightedCluster{
Clusters: weighted,
},
}
}
}
out.Decorator = &route.Decorator{
Operation: getRouteOperation(out, virtualService.Name, port),
}
if fault := in.Fault; fault != nil {
out.TypedPerFilterConfig[wellknown.Fault] = util.MessageToAny(translateFault(in.Fault))
}
return out
}
// SortHeaderValueOption type and the functions below (Len, Less and Swap) are for sort.Stable for type HeaderValueOption
type SortHeaderValueOption []*core.HeaderValueOption
// mirrorPercent computes the mirror percent to be used based on "Mirror" data in route.
func mirrorPercent(in *networking.HTTPRoute) *core.RuntimeFractionalPercent {
switch {
case in.MirrorPercentage != nil:
if in.MirrorPercentage.GetValue() > 0 {
return &core.RuntimeFractionalPercent{
DefaultValue: translatePercentToFractionalPercent(in.MirrorPercentage),
}
}
// If zero percent is provided explicitly, we should not mirror.
return nil
case in.MirrorPercent != nil:
if in.MirrorPercent.GetValue() > 0 {
return &core.RuntimeFractionalPercent{
DefaultValue: translateIntegerToFractionalPercent((int32(in.MirrorPercent.GetValue()))),
}
}
// If zero percent is provided explicitly, we should not mirror.
return nil
default:
// Default to 100 percent if percent is not given.
return &core.RuntimeFractionalPercent{
DefaultValue: translateIntegerToFractionalPercent(100),
}
}
}
// Len is i the sort.Interface for SortHeaderValueOption
func (b SortHeaderValueOption) Len() int {
return len(b)
}
// Less is in the sort.Interface for SortHeaderValueOption
func (b SortHeaderValueOption) Less(i, j int) bool {
if b[i] == nil || b[i].Header == nil {
return false
} else if b[j] == nil || b[j].Header == nil
|
return strings.Compare(b[i].Header.Key, b[j].Header.Key) < 0
}
// Swap is in the sort.Interface for SortHeaderValueOption
func (b SortHeaderValueOption) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}
// translateAppendHeaders translates headers
func translateAppendHeaders(headers map[string]string, appendFlag bool) []*core.HeaderValueOption {
if len(headers) == 0 {
return nil
}
headerValueOptionList := make([]*core.HeaderValueOption, 0, len(headers))
for key, value := range headers {
headerValueOptionList = append(headerValueOptionList, &core.HeaderValueOption{
Header: &core.HeaderValue{
Key: key,
Value: value,
},
Append: &wrappers.BoolValue{Value: appendFlag},
})
}
sort.Stable(SortHeaderValueOption(headerValueOptionList))
return headerValueOptionList
}
type headersOperations struct {
requestHeadersToAdd []*core.HeaderValueOption
responseHeadersToAdd []*core.HeaderValueOption
requestHeadersToRemove []string
responseHeadersToRemove []string
}
// translateHeadersOperations translates headers operations
func translateHeadersOperations(headers *networking.Headers) headersOperations {
req := headers.GetRequest()
resp := headers.GetResponse()
requestHeadersToAdd := translateAppendHeaders(req.GetSet(), false)
requestHeadersToAdd = append(requestHeadersToAdd, translateAppendHeaders(req.GetAdd(), true)...)
responseHeadersToAdd := translateAppendHeaders(resp.GetSet(), false)
responseHeadersToAdd = append(responseHeadersToAdd, translateAppendHeaders(resp.GetAdd(), true)...)
return headersOperations{
requestHeadersToAdd: requestHeadersToAdd,
responseHeadersToAdd: responseHeadersToAdd,
requestHeadersToRemove: append([]string{}, req.GetRemove()...), // copy slice
responseHeadersToRemove: append([]string{}, resp.GetRemove()...),
}
}
// translateRouteMatch translates match condition
func translateRouteMatch(in *networking.HTTPMatchRequest, node *model.Proxy) *route.RouteMatch {
out := &route.RouteMatch{PathSpecifier: &route.RouteMatch_Prefix{Prefix: "/"}}
if in == nil {
return out
}
for name, stringMatch := range in.Headers {
matcher := translateHeaderMatch(name, stringMatch, node)
out.Headers = append(out.Headers, matcher)
}
for name, stringMatch := range in.WithoutHeaders {
matcher := translateHeaderMatch(name, stringMatch, node)
matcher.InvertMatch = true
out.Headers = append(out.Headers, matcher)
}
// guarantee ordering of headers
sort.Slice(out.Headers, func(i, j int) bool {
return out.Headers[i].Name < out.Headers[j].Name
})
if in.Uri != nil {
switch m := in.Uri.MatchType.(type) {
case *networking.StringMatch_Exact:
out.PathSpecifier = &route.RouteMatch_Path{Path: m.Exact}
case *networking.StringMatch_Prefix:
out.PathSpecifier = &route.RouteMatch_Prefix{Prefix: m.Prefix}
case *networking.StringMatch_Regex:
out.PathSpecifier = &route.RouteMatch_SafeRegex{
SafeRegex: &matcher.RegexMatcher{
// nolint: staticcheck
EngineType: regexMatcher(node),
Regex: m.Regex,
},
}
}
}
out.CaseSensitive = &wrappers.BoolValue{Value: !in.IgnoreUriCase}
if in.Method != nil {
matcher := translateHeaderMatch(HeaderMethod, in.Method, node)
out.Headers = append(out.Headers, matcher)
}
if in.Authority != nil {
matcher := translateHeaderMatch(HeaderAuthority, in.Authority, node)
out.Headers = append(out.Headers, matcher)
}
if in.Scheme != nil {
matcher := translateHeaderMatch(HeaderScheme, in.Scheme, node)
out.Headers = append(out.Headers, matcher)
}
for name, stringMatch := range in.QueryParams {
matcher := translateQueryParamMatch(name, stringMatch, node)
out.QueryParameters = append(out.QueryParameters, matcher)
}
return out
}
// translateQueryParamMatch translates a StringMatch to a QueryParameterMatcher.
func translateQueryParamMatch(name string, in *networking.StringMatch, node *model.Proxy) *route.QueryParameterMatcher {
out := &route.QueryParameterMatcher{
Name: name,
}
switch m := in.MatchType.(type) {
case *networking.StringMatch_Exact:
out.QueryParameterMatchSpecifier = &route.QueryParameterMatcher_StringMatch{
StringMatch: &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_Exact{Exact: m.Exact}},
}
case *networking.StringMatch_Regex:
out.QueryParameterMatchSpecifier = &route.QueryParameterMatcher_StringMatch{
StringMatch: &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_SafeRegex{
SafeRegex: &matcher.RegexMatcher{
EngineType: regexMatcher(node),
Regex: m.Regex,
},
},
}}
}
return out
}
// isCatchAllHeaderMatch determines if the given header is matched with all strings or not.
// Currently, if the regex has "*" value, it returns true
func isCatchAllHeaderMatch(in *networking.StringMatch) bool {
catchall := false
if in == nil {
return true
}
switch m := in.MatchType.(type) {
case *networking.StringMatch_Regex:
catchall = m.Regex == "*"
}
return catchall
}
// translateHeaderMatch translates to HeaderMatcher
func translateHeaderMatch(name string, in *networking.StringMatch, node *model.Proxy) *route.HeaderMatcher {
out := &route.HeaderMatcher{
Name: name,
}
if isCatchAllHeaderMatch(in) {
out.HeaderMatchSpecifier = &route.HeaderMatcher_PresentMatch{PresentMatch: true}
return out
}
switch m := in.MatchType.(type) {
case *networking.StringMatch_Exact:
out.HeaderMatchSpecifier = &route.HeaderMatcher_ExactMatch{ExactMatch: m.Exact}
case *networking.StringMatch_Prefix:
// Envoy regex grammar is RE2 (https://github.com/google/re2/wiki/Syntax)
// Golang has a slightly different regex grammar
out.HeaderMatchSpecifier = &route.HeaderMatcher_PrefixMatch{PrefixMatch: m.Prefix}
case *networking.StringMatch_Regex:
out.HeaderMatchSpecifier = &route.HeaderMatcher_SafeRegexMatch{
SafeRegexMatch: &matcher.RegexMatcher{
EngineType: regexMatcher(node),
Regex: m.Regex,
},
}
}
return out
}
func convertToExactEnvoyMatch(in []string) []*matcher.StringMatcher {
res := make([]*matcher.StringMatcher, 0, len(in))
for _, istioMatcher := range in {
res = append(res, &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_Exact{Exact: istioMatcher}})
}
return res
}
func convertToEnvoyMatch(in []*networking.StringMatch, node *model.Proxy) []*matcher.StringMatcher {
res := make([]*matcher.StringMatcher, 0, len(in))
for _, istioMatcher := range in {
switch m := istioMatcher.MatchType.(type) {
case *networking.StringMatch_Exact:
res = append(res, &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_Exact{Exact: m.Exact}})
case *networking.StringMatch_Prefix:
res = append(res, &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_Prefix{Prefix: m.Prefix}})
case *networking.StringMatch_Regex:
res = append(res, &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_SafeRegex{
SafeRegex: &matcher.RegexMatcher{
EngineType: regexMatcher(node),
Regex: m.Regex,
},
},
})
}
}
return res
}
// translateCORSPolicy translates CORS policy
func translateCORSPolicy(in *networking.CorsPolicy, node *model.Proxy) *route.CorsPolicy {
if in == nil {
return nil
}
// CORS filter is enabled by default
out := route.CorsPolicy{}
if in.AllowOrigins != nil {
out.AllowOriginStringMatch = convertToEnvoyMatch(in.AllowOrigins, node)
} else if in.AllowOrigin != nil {
out.AllowOriginStringMatch = convertToExactEnvoyMatch(in.AllowOrigin)
}
out.EnabledSpecifier = &route.CorsPolicy_FilterEnabled{
FilterEnabled: &core.RuntimeFractionalPercent{
DefaultValue: &xdstype.FractionalPercent{
Numerator: 100,
Denominator: xdstype.FractionalPercent_HUNDRED,
},
},
}
out.AllowCredentials = gogo.BoolToProtoBool(in.AllowCredentials)
out.AllowHeaders = strings.Join(in.AllowHeaders, ",")
out.AllowMethods = strings.Join(in.AllowMethods, ",")
out.ExposeHeaders = strings.Join(in.ExposeHeaders, ",")
if in.MaxAge != nil {
out.MaxAge = strconv.FormatInt(in.MaxAge.GetSeconds(), 10)
}
return &out
}
// getRouteOperation returns readable route description for trace.
func getRouteOperation(in *route.Route, vsName string, port int) string {
path := "/*"
m := in.GetMatch()
ps := m.GetPathSpecifier()
if ps != nil {
switch ps.(type) {
case *route.RouteMatch_Prefix:
path = m.GetPrefix() + "*"
case *route.RouteMatch_Path:
path = m.GetPath()
case *route.RouteMatch_SafeRegex:
path = m.GetSafeRegex().GetRegex()
}
}
// If there is only one destination cluster in route, return host:port/uri as description of route.
// Otherwise there are multiple destination clusters and destination host is not clear. For that case
// return virtual serivce name:port/uri as substitute.
if c := in.GetRoute().GetCluster(); model.IsValidSubsetKey(c) {
// Parse host and port from cluster name.
_, _, h, p := model.ParseSubsetKey(c)
return string(h) + ":" + strconv.Itoa(p) + path
}
return vsName + ":" + strconv.Itoa(port) + path
}
// BuildDefaultHTTPInboundRoute builds a default inbound route.
func BuildDefaultHTTPInboundRoute(node *model.Proxy, clusterName string, operation string) *route.Route {
notimeout := ptypes.DurationProto(0)
routeAction := &route.RouteAction{
ClusterSpecifier: &route.RouteAction_Cluster{Cluster: clusterName},
Timeout: notimeout,
}
if util.IsIstioVersionGE18(node) {
routeAction.MaxStreamDuration = &route.RouteAction_MaxStreamDuration{
// If not configured at all, the grpc-timeout header is not used and
// gRPC requests time out like any other requests using timeout or its default.
MaxStreamDuration: notimeout,
}
} else {
// nolint: staticcheck
routeAction.MaxGrpcTimeout = notimeout
}
val := &route.Route{
Match: translateRouteMatch(nil, node),
Decorator: &route.Decorator{
Operation: operation,
},
Action: &route.Route_Route{
Route: routeAction,
},
}
val.Name = DefaultRouteName
return val
}
// BuildDefaultHTTPOutboundRoute builds a default outbound route, including a retry policy.
func BuildDefaultHTTPOutboundRoute(node *model.Proxy, clusterName string, operation string) *route.Route {
// Start with the same configuration as for inbound.
out := BuildDefaultHTTPInboundRoute(node, clusterName, operation)
// Add a default retry policy for outbound routes.
out.GetRoute().RetryPolicy = retry.DefaultPolicy()
return out
}
// translatePercentToFractionalPercent translates an v1alpha3 Percent instance
// to an envoy.type.FractionalPercent instance.
func translatePercentToFractionalPercent(p *networking.Percent) *xdstype.FractionalPercent {
return &xdstype.FractionalPercent{
Numerator: uint32(p.Value * 10000),
Denominator: xdstype.FractionalPercent_MILLION,
}
}
// translateIntegerToFractionalPercent translates an int32 instance to an
// envoy.type.FractionalPercent instance.
func translateIntegerToFractionalPercent(p int32) *xdstype.FractionalPercent {
return &xdstype.FractionalPercent{
Numerator: uint32(p),
Denominator: xdstype.FractionalPercent_HUNDRED,
}
}
// translateFault translates networking.HTTPFaultInjection into Envoy's HTTPFault
func translateFault(in *networking.HTTPFaultInjection) *xdshttpfault.HTTPFault {
if in == nil {
return nil
}
out := xdshttpfault.HTTPFault{}
if in.Delay != nil {
out.Delay = &xdsfault.FaultDelay{}
if in.Delay.Percentage != nil {
out.Delay.Percentage = translatePercentToFractionalPercent(in.Delay.Percentage)
} else {
out.Delay.Percentage = translateIntegerToFractionalPercent(in.Delay.Percent)
}
switch d := in.Delay.HttpDelayType.(type) {
case *networking.HTTPFaultInjection_Delay_FixedDelay:
out.Delay.FaultDelaySecifier = &xdsfault.FaultDelay_FixedDelay{
FixedDelay: gogo.DurationToProtoDuration(d.FixedDelay),
}
default:
log.Warnf("Exponential faults are not yet supported")
out.Delay = nil
}
}
if in.Abort != nil {
out.Abort = &xdshttpfault.FaultAbort{}
if in.Abort.Percentage != nil {
out.Abort.Percentage = translatePercentToFractionalPercent(in.Abort.Percentage)
}
switch a := in.Abort.ErrorType.(type) {
case *networking.HTTPFaultInjection_Abort_HttpStatus:
out.Abort.ErrorType = &xdshttpfault.FaultAbort_HttpStatus{
HttpStatus: uint32(a.HttpStatus),
}
default:
log.Warnf("Non-HTTP type abort faults are not yet supported")
out.Abort = nil
}
}
if out.Delay == nil && out.Abort == nil {
return nil
}
return &out
}
func portLevelSettingsConsistentHash(dst *networking.Destination,
pls []*networking.TrafficPolicy_PortTrafficPolicy) *networking.LoadBalancerSettings_ConsistentHashLB {
if dst.Port != nil {
portNumber := dst.GetPort().GetNumber()
for _, setting := range pls {
number := setting.GetPort().GetNumber()
if number == portNumber {
return setting.GetLoadBalancer().GetConsistentHash()
}
}
}
return nil
}
func consistentHashToHashPolicy(consistentHash *networking.LoadBalancerSettings_ConsistentHashLB) *route.RouteAction_HashPolicy {
switch consistentHash.GetHashKey().(type) {
case *networking.LoadBalancerSettings_ConsistentHashLB_HttpHeaderName:
return &route.RouteAction_HashPolicy{
PolicySpecifier: &route.RouteAction_HashPolicy_Header_{
Header: &route.RouteAction_HashPolicy_Header{
HeaderName: consistentHash.GetHttpHeaderName(),
},
},
}
case *networking.LoadBalancerSettings_ConsistentHashLB_HttpCookie:
cookie := consistentHash.GetHttpCookie()
var ttl *duration.Duration
if cookie.GetTtl() != nil {
ttl = gogo.DurationToProtoDuration(cookie.GetTtl())
}
return &route.RouteAction_HashPolicy{
PolicySpecifier: &route.RouteAction_HashPolicy_Cookie_{
Cookie: &route.RouteAction_HashPolicy_Cookie{
Name: cookie.GetName(),
Ttl: ttl,
Path: cookie.GetPath(),
},
},
}
case *networking.LoadBalancerSettings_ConsistentHashLB_UseSourceIp:
return &route.RouteAction_HashPolicy{
PolicySpecifier: &route.RouteAction_HashPolicy_ConnectionProperties_{
ConnectionProperties: &route.RouteAction_HashPolicy_ConnectionProperties{
SourceIp: consistentHash.GetUseSourceIp(),
},
},
}
case *networking.LoadBalancerSettings_ConsistentHashLB_HttpQueryParameterName:
return &route.RouteAction_HashPolicy{
PolicySpecifier: &route.RouteAction_HashPolicy_QueryParameter_{
QueryParameter: &route.RouteAction_HashPolicy_QueryParameter{
Name: consistentHash.GetHttpQueryParameterName(),
},
},
}
}
return nil
}
func getHashPolicyByService(node *model.Proxy, push *model.PushContext, svc *model.Service, port *model.Port) *route.RouteAction_HashPolicy {
if push == nil {
return nil
}
destinationRule := push.DestinationRule(node, svc)
if destinationRule == nil {
return nil
}
rule := destinationRule.Spec.(*networking.DestinationRule)
consistentHash := rule.GetTrafficPolicy().GetLoadBalancer().GetConsistentHash()
portLevelSettings := rule.GetTrafficPolicy().GetPortLevelSettings()
for _, setting := range portLevelSettings {
number := setting.GetPort().GetNumber()
if int(number) == port.Port {
consistentHash = setting.GetLoadBalancer().GetConsistentHash()
break
}
}
return consistentHashToHashPolicy(consistentHash)
}
func getHashPolicy(push *model.PushContext, node *model.Proxy, dst *networking.HTTPRouteDestination,
configNamespace string) *route.RouteAction_HashPolicy {
if push == nil {
return nil
}
destination := dst.GetDestination()
destinationRule := push.DestinationRule(node,
&model.Service{
Hostname: host.Name(destination.Host),
Attributes: model.ServiceAttributes{Namespace: configNamespace},
})
if destinationRule == nil {
return nil
}
rule := destinationRule.Spec.(*networking.DestinationRule)
consistentHash := rule.GetTrafficPolicy().GetLoadBalancer().GetConsistentHash()
portLevelSettings := rule.GetTrafficPolicy().GetPortLevelSettings()
plsHash := portLevelSettingsConsistentHash(destination, portLevelSettings)
var subsetHash, subsetPLSHash *networking.LoadBalancerSettings_ConsistentHashLB
for _, subset := range rule.GetSubsets() {
if subset.GetName() == destination.GetSubset() {
subsetPortLevelSettings := subset.GetTrafficPolicy().GetPortLevelSettings()
subsetHash = subset.GetTrafficPolicy().GetLoadBalancer().GetConsistentHash()
subsetPLSHash = portLevelSettingsConsistentHash(destination, subsetPortLevelSettings)
break
}
}
switch {
case subsetPLSHash != nil:
consistentHash = subsetPLSHash
case subsetHash != nil:
consistentHash = subsetHash
case plsHash != nil:
consistentHash = plsHash
}
return consistentHashToHashPolicy(consistentHash)
}
// isCatchAll returns true if HTTPMatchRequest is a catchall match otherwise
// false. Note - this may not be exactly "catch all" as we don't know the full
// class of possible inputs As such, this is used only for optimization.
func isCatchAllMatch(m *networking.HTTPMatchRequest) bool {
catchall := false
if m.Uri != nil {
switch m := m.Uri.MatchType.(type) {
case *networking.StringMatch_Prefix:
catchall = m.Prefix == "/"
case *networking.StringMatch_Regex:
catchall = m.Regex == "*"
}
}
// A Match is catch all if and only if it has no match set
// and URI has a prefix / or regex *.
return catchall &&
len(m.Headers) == 0 &&
len(m.QueryParams) == 0 &&
len(m.SourceLabels) == 0 &&
len(m.WithoutHeaders) == 0 &&
len(m.Gateways) == 0 &&
m.Method == nil &&
m.Scheme == nil &&
m.Port == 0 &&
m.Authority == nil &&
m.SourceNamespace == ""
}
// CombineVHostRoutes semi concatenates Vhost's routes into a single route set.
// Moves the catch all routes alone to the end, while retaining
// the relative order of other routes in the concatenated route.
// Assumes that the virtual services that generated first and second are ordered by
// time.
func CombineVHostRoutes(routeSets ...[]*route.Route) []*route.Route {
l := 0
for _, rs := range routeSets {
l += len(rs)
}
allroutes := make([]*route.Route, 0, l)
catchAllRoutes := make([]*route.Route, 0)
for _, routes := range routeSets {
for _, r := range routes {
if isCatchAllRoute(r) {
catchAllRoutes = append(catchAllRoutes, r)
} else {
allroutes = append(allroutes, r)
}
}
}
return append(allroutes, catchAllRoutes...)
}
// isCatchAllRoute returns true if an Envoy route is a catchall route otherwise false.
func isCatchAllRoute(r *route.Route) bool {
catchall := false
switch ir := r.Match.PathSpecifier.(type) {
case *route.RouteMatch_Prefix:
catchall = ir.Prefix == "/"
case *route.RouteMatch_SafeRegex:
catchall = ir.SafeRegex.GetRegex() == "*"
}
// A Match is catch all if and only if it has no header/query param match
// and URI has a prefix / or regex *.
return catchall && len(r.Match.Headers) == 0 && len(r.Match.QueryParameters) == 0
}
func traceOperation(host string, port int) string {
// Format : "%s:%d/*"
return host + ":" + strconv.Itoa(port) + "/*"
}
func regexMatcher(node *model.Proxy) *matcher.RegexMatcher_GoogleRe2 {
if util.IsIstioVersionGE18(node) {
return regexEngine
}
return regexEngineWithMaxProgramSize
}
|
{
return true
}
|
jar_test.go
|
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cookiejar
import (
"fmt"
"net/http"
"net/url"
"sort"
"strings"
"testing"
"time"
)
// tNow is the synthetic current time used as now during testing.
var tNow = time.Date(2013, 1, 1, 12, 0, 0, 0, time.UTC)
// testPSL implements PublicSuffixList with just two rules: "co.uk"
// and the default rule "*".
type testPSL struct{}
func (testPSL) String() string {
return "testPSL"
}
func (testPSL) PublicSuffix(d string) string {
if d == "co.uk" || strings.HasSuffix(d, ".co.uk") {
return "co.uk"
}
return d[strings.LastIndex(d, ".")+1:]
}
// newTestJar creates an empty Jar with testPSL as the public suffix list.
func newTestJar() *Jar {
jar, err := New(&Options{PublicSuffixList: testPSL{}})
if err != nil {
panic(err)
}
return jar
}
var hasDotSuffixTests = [...]struct {
s, suffix string
}{
{"", ""},
{"", "."},
{"", "x"},
{".", ""},
{".", "."},
{".", ".."},
{".", "x"},
{".", "x."},
{".", ".x"},
{".", ".x."},
{"x", ""},
{"x", "."},
{"x", ".."},
{"x", "x"},
{"x", "x."},
{"x", ".x"},
{"x", ".x."},
{".x", ""},
{".x", "."},
{".x", ".."},
{".x", "x"},
{".x", "x."},
{".x", ".x"},
{".x", ".x."},
{"x.", ""},
{"x.", "."},
{"x.", ".."},
{"x.", "x"},
{"x.", "x."},
{"x.", ".x"},
{"x.", ".x."},
{"com", ""},
{"com", "m"},
{"com", "om"},
{"com", "com"},
{"com", ".com"},
{"com", "x.com"},
{"com", "xcom"},
{"com", "xorg"},
{"com", "org"},
{"com", "rg"},
{"foo.com", ""},
{"foo.com", "m"},
{"foo.com", "om"},
{"foo.com", "com"},
{"foo.com", ".com"},
{"foo.com", "o.com"},
{"foo.com", "oo.com"},
{"foo.com", "foo.com"},
{"foo.com", ".foo.com"},
{"foo.com", "x.foo.com"},
{"foo.com", "xfoo.com"},
{"foo.com", "xfoo.org"},
{"foo.com", "foo.org"},
{"foo.com", "oo.org"},
{"foo.com", "o.org"},
{"foo.com", ".org"},
{"foo.com", "org"},
{"foo.com", "rg"},
}
func TestHasDotSuffix(t *testing.T) {
for _, tc := range hasDotSuffixTests {
got := hasDotSuffix(tc.s, tc.suffix)
want := strings.HasSuffix(tc.s, "."+tc.suffix)
if got != want {
t.Errorf("s=%q, suffix=%q: got %v, want %v", tc.s, tc.suffix, got, want)
}
}
}
var canonicalHostTests = map[string]string{
"www.example.com": "www.example.com",
"WWW.EXAMPLE.COM": "www.example.com",
"wWw.eXAmple.CoM": "www.example.com",
"www.example.com:80": "www.example.com",
"192.168.0.10": "192.168.0.10",
"192.168.0.5:8080": "192.168.0.5",
"2001:4860:0:2001::68": "2001:4860:0:2001::68",
"[2001:4860:0:::68]:8080": "2001:4860:0:::68",
"www.bücher.de": "www.xn--bcher-kva.de",
"www.example.com.": "www.example.com",
// TODO: Fix canonicalHost so that all of the following malformed
// domain names trigger an error. (This list is not exhaustive, e.g.
// malformed internationalized domain names are missing.)
".": "",
"..": ".",
"...": "..",
".net": ".net",
".net.": ".net",
"a..": "a.",
"b.a..": "b.a.",
"weird.stuff...": "weird.stuff..",
"[bad.unmatched.bracket:": "error",
}
func TestCanonicalHost(t *testing.T) {
for h, want := range canonicalHostTests {
got, err := canonicalHost(h)
if want == "error" {
if err == nil {
t.Errorf("%q: got %q and nil error, want non-nil", h, got)
}
continue
}
if err != nil {
t.Errorf("%q: %v", h, err)
continue
}
if got != want {
t.Errorf("%q: got %q, want %q", h, got, want)
continue
}
}
}
var hasPortTests = map[string]bool{
"www.example.com": false,
"www.example.com:80": true,
"127.0.0.1": false,
"127.0.0.1:8080": true,
"2001:4860:0:2001::68": false,
"[2001::0:::68]:80": true,
}
func TestHasPort(t *testing.T) {
for host, want := range hasPortTests {
if got := hasPort(host); got != want {
t.Errorf("%q: got %t, want %t", host, got, want)
}
}
}
var jarKeyTests = map[string]string{
"foo.www.example.com": "example.com",
"www.example.com": "example.com",
"example.com": "example.com",
"com": "com",
"foo.www.bbc.co.uk": "bbc.co.uk",
"www.bbc.co.uk": "bbc.co.uk",
"bbc.co.uk": "bbc.co.uk",
"co.uk": "co.uk",
"uk": "uk",
"192.168.0.5": "192.168.0.5",
// The following are actual outputs of canonicalHost for
// malformed inputs to canonicalHost (see above).
"": "",
".": ".",
"..": ".",
".net": ".net",
"a.": "a.",
"b.a.": "a.",
"weird.stuff..": ".",
}
func TestJarKey(t *testing.T) {
for host, want := range jarKeyTests {
if got := jarKey(host, testPSL{}); got != want {
t.Errorf("%q: got %q, want %q", host, got, want)
}
}
}
var jarKeyNilPSLTests = map[string]string{
"foo.www.example.com": "example.com",
"www.example.com": "example.com",
"example.com": "example.com",
"com": "com",
"foo.www.bbc.co.uk": "co.uk",
"www.bbc.co.uk": "co.uk",
"bbc.co.uk": "co.uk",
"co.uk": "co.uk",
"uk": "uk",
"192.168.0.5": "192.168.0.5",
// The following are actual outputs of canonicalHost for
// malformed inputs to canonicalHost.
"": "",
".": ".",
"..": "..",
".net": ".net",
"a.": "a.",
"b.a.": "a.",
"weird.stuff..": "stuff..",
}
func TestJarKeyNilPSL(t *testing.T) {
for host, want := range jarKeyNilPSLTests {
|
}
}
}
var isIPTests = map[string]bool{
"127.0.0.1": true,
"1.2.3.4": true,
"2001:4860:0:2001::68": true,
"example.com": false,
"1.1.1.300": false,
"www.foo.bar.net": false,
"123.foo.bar.net": false,
}
func TestIsIP(t *testing.T) {
for host, want := range isIPTests {
if got := isIP(host); got != want {
t.Errorf("%q: got %t, want %t", host, got, want)
}
}
}
var defaultPathTests = map[string]string{
"/": "/",
"/abc": "/",
"/abc/": "/abc",
"/abc/xyz": "/abc",
"/abc/xyz/": "/abc/xyz",
"/a/b/c.html": "/a/b",
"": "/",
"strange": "/",
"//": "/",
"/a//b": "/a/",
"/a/./b": "/a/.",
"/a/../b": "/a/..",
}
func TestDefaultPath(t *testing.T) {
for path, want := range defaultPathTests {
if got := defaultPath(path); got != want {
t.Errorf("%q: got %q, want %q", path, got, want)
}
}
}
var domainAndTypeTests = [...]struct {
host string // host Set-Cookie header was received from
domain string // domain attribute in Set-Cookie header
wantDomain string // expected domain of cookie
wantHostOnly bool // expected host-cookie flag
wantErr error // expected error
}{
{"www.example.com", "", "www.example.com", true, nil},
{"127.0.0.1", "", "127.0.0.1", true, nil},
{"2001:4860:0:2001::68", "", "2001:4860:0:2001::68", true, nil},
{"www.example.com", "example.com", "example.com", false, nil},
{"www.example.com", ".example.com", "example.com", false, nil},
{"www.example.com", "www.example.com", "www.example.com", false, nil},
{"www.example.com", ".www.example.com", "www.example.com", false, nil},
{"foo.sso.example.com", "sso.example.com", "sso.example.com", false, nil},
{"bar.co.uk", "bar.co.uk", "bar.co.uk", false, nil},
{"foo.bar.co.uk", ".bar.co.uk", "bar.co.uk", false, nil},
{"127.0.0.1", "127.0.0.1", "", false, errNoHostname},
{"2001:4860:0:2001::68", "2001:4860:0:2001::68", "2001:4860:0:2001::68", false, errNoHostname},
{"www.example.com", ".", "", false, errMalformedDomain},
{"www.example.com", "..", "", false, errMalformedDomain},
{"www.example.com", "other.com", "", false, errIllegalDomain},
{"www.example.com", "com", "", false, errIllegalDomain},
{"www.example.com", ".com", "", false, errIllegalDomain},
{"foo.bar.co.uk", ".co.uk", "", false, errIllegalDomain},
{"127.www.0.0.1", "127.0.0.1", "", false, errIllegalDomain},
{"com", "", "com", true, nil},
{"com", "com", "com", true, nil},
{"com", ".com", "com", true, nil},
{"co.uk", "", "co.uk", true, nil},
{"co.uk", "co.uk", "co.uk", true, nil},
{"co.uk", ".co.uk", "co.uk", true, nil},
}
func TestDomainAndType(t *testing.T) {
jar := newTestJar()
for _, tc := range domainAndTypeTests {
domain, hostOnly, err := jar.domainAndType(tc.host, tc.domain)
if err != tc.wantErr {
t.Errorf("%q/%q: got %q error, want %q",
tc.host, tc.domain, err, tc.wantErr)
continue
}
if err != nil {
continue
}
if domain != tc.wantDomain || hostOnly != tc.wantHostOnly {
t.Errorf("%q/%q: got %q/%t want %q/%t",
tc.host, tc.domain, domain, hostOnly,
tc.wantDomain, tc.wantHostOnly)
}
}
}
// expiresIn creates an expires attribute delta seconds from tNow.
func expiresIn(delta int) string {
t := tNow.Add(time.Duration(delta) * time.Second)
return "expires=" + t.Format(time.RFC1123)
}
// mustParseURL parses s to an URL and panics on error.
func mustParseURL(s string) *url.URL {
u, err := url.Parse(s)
if err != nil || u.Scheme == "" || u.Host == "" {
panic(fmt.Sprintf("Unable to parse URL %s.", s))
}
return u
}
// jarTest encapsulates the following actions on a jar:
// 1. Perform SetCookies with fromURL and the cookies from setCookies.
// (Done at time tNow + 0 ms.)
// 2. Check that the entries in the jar matches content.
// (Done at time tNow + 1001 ms.)
// 3. For each query in tests: Check that Cookies with toURL yields the
// cookies in want.
// (Query n done at tNow + (n+2)*1001 ms.)
type jarTest struct {
description string // The description of what this test is supposed to test
fromURL string // The full URL of the request from which Set-Cookie headers where received
setCookies []string // All the cookies received from fromURL
content string // The whole (non-expired) content of the jar
queries []query // Queries to test the Jar.Cookies method
}
// query contains one test of the cookies returned from Jar.Cookies.
type query struct {
toURL string // the URL in the Cookies call
want string // the expected list of cookies (order matters)
}
// run runs the jarTest.
func (test jarTest) run(t *testing.T, jar *Jar) {
now := tNow
// Populate jar with cookies.
setCookies := make([]*http.Cookie, len(test.setCookies))
for i, cs := range test.setCookies {
cookies := (&http.Response{Header: http.Header{"Set-Cookie": {cs}}}).Cookies()
if len(cookies) != 1 {
panic(fmt.Sprintf("Wrong cookie line %q: %#v", cs, cookies))
}
setCookies[i] = cookies[0]
}
jar.setCookies(mustParseURL(test.fromURL), setCookies, now)
now = now.Add(1001 * time.Millisecond)
// Serialize non-expired entries in the form "name1=val1 name2=val2".
var cs []string
for _, submap := range jar.entries {
for _, cookie := range submap {
if !cookie.Expires.After(now) {
continue
}
cs = append(cs, cookie.Name+"="+cookie.Value)
}
}
sort.Strings(cs)
got := strings.Join(cs, " ")
// Make sure jar content matches our expectations.
if got != test.content {
t.Errorf("Test %q Content\ngot %q\nwant %q",
test.description, got, test.content)
}
// Test different calls to Cookies.
for i, query := range test.queries {
now = now.Add(1001 * time.Millisecond)
var s []string
for _, c := range jar.cookies(mustParseURL(query.toURL), now) {
s = append(s, c.Name+"="+c.Value)
}
if got := strings.Join(s, " "); got != query.want {
t.Errorf("Test %q #%d\ngot %q\nwant %q", test.description, i, got, query.want)
}
}
}
// basicsTests contains fundamental tests. Each jarTest has to be performed on
// a fresh, empty Jar.
var basicsTests = [...]jarTest{
{
"Retrieval of a plain host cookie.",
"http://www.host.test/",
[]string{"A=a"},
"A=a",
[]query{
{"http://www.host.test", "A=a"},
{"http://www.host.test/", "A=a"},
{"http://www.host.test/some/path", "A=a"},
{"https://www.host.test", "A=a"},
{"https://www.host.test/", "A=a"},
{"https://www.host.test/some/path", "A=a"},
{"ftp://www.host.test", ""},
{"ftp://www.host.test/", ""},
{"ftp://www.host.test/some/path", ""},
{"http://www.other.org", ""},
{"http://sibling.host.test", ""},
{"http://deep.www.host.test", ""},
},
},
{
"Secure cookies are not returned to http.",
"http://www.host.test/",
[]string{"A=a; secure"},
"A=a",
[]query{
{"http://www.host.test", ""},
{"http://www.host.test/", ""},
{"http://www.host.test/some/path", ""},
{"https://www.host.test", "A=a"},
{"https://www.host.test/", "A=a"},
{"https://www.host.test/some/path", "A=a"},
},
},
{
"Explicit path.",
"http://www.host.test/",
[]string{"A=a; path=/some/path"},
"A=a",
[]query{
{"http://www.host.test", ""},
{"http://www.host.test/", ""},
{"http://www.host.test/some", ""},
{"http://www.host.test/some/", ""},
{"http://www.host.test/some/path", "A=a"},
{"http://www.host.test/some/paths", ""},
{"http://www.host.test/some/path/foo", "A=a"},
{"http://www.host.test/some/path/foo/", "A=a"},
},
},
{
"Implicit path #1: path is a directory.",
"http://www.host.test/some/path/",
[]string{"A=a"},
"A=a",
[]query{
{"http://www.host.test", ""},
{"http://www.host.test/", ""},
{"http://www.host.test/some", ""},
{"http://www.host.test/some/", ""},
{"http://www.host.test/some/path", "A=a"},
{"http://www.host.test/some/paths", ""},
{"http://www.host.test/some/path/foo", "A=a"},
{"http://www.host.test/some/path/foo/", "A=a"},
},
},
{
"Implicit path #2: path is not a directory.",
"http://www.host.test/some/path/index.html",
[]string{"A=a"},
"A=a",
[]query{
{"http://www.host.test", ""},
{"http://www.host.test/", ""},
{"http://www.host.test/some", ""},
{"http://www.host.test/some/", ""},
{"http://www.host.test/some/path", "A=a"},
{"http://www.host.test/some/paths", ""},
{"http://www.host.test/some/path/foo", "A=a"},
{"http://www.host.test/some/path/foo/", "A=a"},
},
},
{
"Implicit path #3: no path in URL at all.",
"http://www.host.test",
[]string{"A=a"},
"A=a",
[]query{
{"http://www.host.test", "A=a"},
{"http://www.host.test/", "A=a"},
{"http://www.host.test/some/path", "A=a"},
},
},
{
"Cookies are sorted by path length.",
"http://www.host.test/",
[]string{
"A=a; path=/foo/bar",
"B=b; path=/foo/bar/baz/qux",
"C=c; path=/foo/bar/baz",
"D=d; path=/foo"},
"A=a B=b C=c D=d",
[]query{
{"http://www.host.test/foo/bar/baz/qux", "B=b C=c A=a D=d"},
{"http://www.host.test/foo/bar/baz/", "C=c A=a D=d"},
{"http://www.host.test/foo/bar", "A=a D=d"},
},
},
{
"Creation time determines sorting on same length paths.",
"http://www.host.test/",
[]string{
"A=a; path=/foo/bar",
"X=x; path=/foo/bar",
"Y=y; path=/foo/bar/baz/qux",
"B=b; path=/foo/bar/baz/qux",
"C=c; path=/foo/bar/baz",
"W=w; path=/foo/bar/baz",
"Z=z; path=/foo",
"D=d; path=/foo"},
"A=a B=b C=c D=d W=w X=x Y=y Z=z",
[]query{
{"http://www.host.test/foo/bar/baz/qux", "Y=y B=b C=c W=w A=a X=x Z=z D=d"},
{"http://www.host.test/foo/bar/baz/", "C=c W=w A=a X=x Z=z D=d"},
{"http://www.host.test/foo/bar", "A=a X=x Z=z D=d"},
},
},
{
"Sorting of same-name cookies.",
"http://www.host.test/",
[]string{
"A=1; path=/",
"A=2; path=/path",
"A=3; path=/quux",
"A=4; path=/path/foo",
"A=5; domain=.host.test; path=/path",
"A=6; domain=.host.test; path=/quux",
"A=7; domain=.host.test; path=/path/foo",
},
"A=1 A=2 A=3 A=4 A=5 A=6 A=7",
[]query{
{"http://www.host.test/path", "A=2 A=5 A=1"},
{"http://www.host.test/path/foo", "A=4 A=7 A=2 A=5 A=1"},
},
},
{
"Disallow domain cookie on public suffix.",
"http://www.bbc.co.uk",
[]string{
"a=1",
"b=2; domain=co.uk",
},
"a=1",
[]query{{"http://www.bbc.co.uk", "a=1"}},
},
{
"Host cookie on IP.",
"http://192.168.0.10",
[]string{"a=1"},
"a=1",
[]query{{"http://192.168.0.10", "a=1"}},
},
{
"Port is ignored #1.",
"http://www.host.test/",
[]string{"a=1"},
"a=1",
[]query{
{"http://www.host.test", "a=1"},
{"http://www.host.test:8080/", "a=1"},
},
},
{
"Port is ignored #2.",
"http://www.host.test:8080/",
[]string{"a=1"},
"a=1",
[]query{
{"http://www.host.test", "a=1"},
{"http://www.host.test:8080/", "a=1"},
{"http://www.host.test:1234/", "a=1"},
},
},
}
func TestBasics(t *testing.T) {
for _, test := range basicsTests {
jar := newTestJar()
test.run(t, jar)
}
}
// updateAndDeleteTests contains jarTests which must be performed on the same
// Jar.
var updateAndDeleteTests = [...]jarTest{
{
"Set initial cookies.",
"http://www.host.test",
[]string{
"a=1",
"b=2; secure",
"c=3; httponly",
"d=4; secure; httponly"},
"a=1 b=2 c=3 d=4",
[]query{
{"http://www.host.test", "a=1 c=3"},
{"https://www.host.test", "a=1 b=2 c=3 d=4"},
},
},
{
"Update value via http.",
"http://www.host.test",
[]string{
"a=w",
"b=x; secure",
"c=y; httponly",
"d=z; secure; httponly"},
"a=w b=x c=y d=z",
[]query{
{"http://www.host.test", "a=w c=y"},
{"https://www.host.test", "a=w b=x c=y d=z"},
},
},
{
"Clear Secure flag from a http.",
"http://www.host.test/",
[]string{
"b=xx",
"d=zz; httponly"},
"a=w b=xx c=y d=zz",
[]query{{"http://www.host.test", "a=w b=xx c=y d=zz"}},
},
{
"Delete all.",
"http://www.host.test/",
[]string{
"a=1; max-Age=-1", // delete via MaxAge
"b=2; " + expiresIn(-10), // delete via Expires
"c=2; max-age=-1; " + expiresIn(-10), // delete via both
"d=4; max-age=-1; " + expiresIn(10)}, // MaxAge takes precedence
"",
[]query{{"http://www.host.test", ""}},
},
{
"Refill #1.",
"http://www.host.test",
[]string{
"A=1",
"A=2; path=/foo",
"A=3; domain=.host.test",
"A=4; path=/foo; domain=.host.test"},
"A=1 A=2 A=3 A=4",
[]query{{"http://www.host.test/foo", "A=2 A=4 A=1 A=3"}},
},
{
"Refill #2.",
"http://www.google.com",
[]string{
"A=6",
"A=7; path=/foo",
"A=8; domain=.google.com",
"A=9; path=/foo; domain=.google.com"},
"A=1 A=2 A=3 A=4 A=6 A=7 A=8 A=9",
[]query{
{"http://www.host.test/foo", "A=2 A=4 A=1 A=3"},
{"http://www.google.com/foo", "A=7 A=9 A=6 A=8"},
},
},
{
"Delete A7.",
"http://www.google.com",
[]string{"A=; path=/foo; max-age=-1"},
"A=1 A=2 A=3 A=4 A=6 A=8 A=9",
[]query{
{"http://www.host.test/foo", "A=2 A=4 A=1 A=3"},
{"http://www.google.com/foo", "A=9 A=6 A=8"},
},
},
{
"Delete A4.",
"http://www.host.test",
[]string{"A=; path=/foo; domain=host.test; max-age=-1"},
"A=1 A=2 A=3 A=6 A=8 A=9",
[]query{
{"http://www.host.test/foo", "A=2 A=1 A=3"},
{"http://www.google.com/foo", "A=9 A=6 A=8"},
},
},
{
"Delete A6.",
"http://www.google.com",
[]string{"A=; max-age=-1"},
"A=1 A=2 A=3 A=8 A=9",
[]query{
{"http://www.host.test/foo", "A=2 A=1 A=3"},
{"http://www.google.com/foo", "A=9 A=8"},
},
},
{
"Delete A3.",
"http://www.host.test",
[]string{"A=; domain=host.test; max-age=-1"},
"A=1 A=2 A=8 A=9",
[]query{
{"http://www.host.test/foo", "A=2 A=1"},
{"http://www.google.com/foo", "A=9 A=8"},
},
},
{
"No cross-domain delete.",
"http://www.host.test",
[]string{
"A=; domain=google.com; max-age=-1",
"A=; path=/foo; domain=google.com; max-age=-1"},
"A=1 A=2 A=8 A=9",
[]query{
{"http://www.host.test/foo", "A=2 A=1"},
{"http://www.google.com/foo", "A=9 A=8"},
},
},
{
"Delete A8 and A9.",
"http://www.google.com",
[]string{
"A=; domain=google.com; max-age=-1",
"A=; path=/foo; domain=google.com; max-age=-1"},
"A=1 A=2",
[]query{
{"http://www.host.test/foo", "A=2 A=1"},
{"http://www.google.com/foo", ""},
},
},
}
func TestUpdateAndDelete(t *testing.T) {
jar := newTestJar()
for _, test := range updateAndDeleteTests {
test.run(t, jar)
}
}
func TestExpiration(t *testing.T) {
jar := newTestJar()
jarTest{
"Expiration.",
"http://www.host.test",
[]string{
"a=1",
"b=2; max-age=3",
"c=3; " + expiresIn(3),
"d=4; max-age=5",
"e=5; " + expiresIn(5),
"f=6; max-age=100",
},
"a=1 b=2 c=3 d=4 e=5 f=6", // executed at t0 + 1001 ms
[]query{
{"http://www.host.test", "a=1 b=2 c=3 d=4 e=5 f=6"}, // t0 + 2002 ms
{"http://www.host.test", "a=1 d=4 e=5 f=6"}, // t0 + 3003 ms
{"http://www.host.test", "a=1 d=4 e=5 f=6"}, // t0 + 4004 ms
{"http://www.host.test", "a=1 f=6"}, // t0 + 5005 ms
{"http://www.host.test", "a=1 f=6"}, // t0 + 6006 ms
},
}.run(t, jar)
}
//
// Tests derived from Chromium's cookie_store_unittest.h.
//
// See http://src.chromium.org/viewvc/chrome/trunk/src/net/cookies/cookie_store_unittest.h?revision=159685&content-type=text/plain
// Some of the original tests are in a bad condition (e.g.
// DomainWithTrailingDotTest) or are not RFC 6265 conforming (e.g.
// TestNonDottedAndTLD #1 and #6) and have not been ported.
// chromiumBasicsTests contains fundamental tests. Each jarTest has to be
// performed on a fresh, empty Jar.
var chromiumBasicsTests = [...]jarTest{
{
"DomainWithTrailingDotTest.",
"http://www.google.com/",
[]string{
"a=1; domain=.www.google.com.",
"b=2; domain=.www.google.com.."},
"",
[]query{
{"http://www.google.com", ""},
},
},
{
"ValidSubdomainTest #1.",
"http://a.b.c.d.com",
[]string{
"a=1; domain=.a.b.c.d.com",
"b=2; domain=.b.c.d.com",
"c=3; domain=.c.d.com",
"d=4; domain=.d.com"},
"a=1 b=2 c=3 d=4",
[]query{
{"http://a.b.c.d.com", "a=1 b=2 c=3 d=4"},
{"http://b.c.d.com", "b=2 c=3 d=4"},
{"http://c.d.com", "c=3 d=4"},
{"http://d.com", "d=4"},
},
},
{
"ValidSubdomainTest #2.",
"http://a.b.c.d.com",
[]string{
"a=1; domain=.a.b.c.d.com",
"b=2; domain=.b.c.d.com",
"c=3; domain=.c.d.com",
"d=4; domain=.d.com",
"X=bcd; domain=.b.c.d.com",
"X=cd; domain=.c.d.com"},
"X=bcd X=cd a=1 b=2 c=3 d=4",
[]query{
{"http://b.c.d.com", "b=2 c=3 d=4 X=bcd X=cd"},
{"http://c.d.com", "c=3 d=4 X=cd"},
},
},
{
"InvalidDomainTest #1.",
"http://foo.bar.com",
[]string{
"a=1; domain=.yo.foo.bar.com",
"b=2; domain=.foo.com",
"c=3; domain=.bar.foo.com",
"d=4; domain=.foo.bar.com.net",
"e=5; domain=ar.com",
"f=6; domain=.",
"g=7; domain=/",
"h=8; domain=http://foo.bar.com",
"i=9; domain=..foo.bar.com",
"j=10; domain=..bar.com",
"k=11; domain=.foo.bar.com?blah",
"l=12; domain=.foo.bar.com/blah",
"m=12; domain=.foo.bar.com:80",
"n=14; domain=.foo.bar.com:",
"o=15; domain=.foo.bar.com#sup",
},
"", // Jar is empty.
[]query{{"http://foo.bar.com", ""}},
},
{
"InvalidDomainTest #2.",
"http://foo.com.com",
[]string{"a=1; domain=.foo.com.com.com"},
"",
[]query{{"http://foo.bar.com", ""}},
},
{
"DomainWithoutLeadingDotTest #1.",
"http://manage.hosted.filefront.com",
[]string{"a=1; domain=filefront.com"},
"a=1",
[]query{{"http://www.filefront.com", "a=1"}},
},
{
"DomainWithoutLeadingDotTest #2.",
"http://www.google.com",
[]string{"a=1; domain=www.google.com"},
"a=1",
[]query{
{"http://www.google.com", "a=1"},
{"http://sub.www.google.com", "a=1"},
{"http://something-else.com", ""},
},
},
{
"CaseInsensitiveDomainTest.",
"http://www.google.com",
[]string{
"a=1; domain=.GOOGLE.COM",
"b=2; domain=.www.gOOgLE.coM"},
"a=1 b=2",
[]query{{"http://www.google.com", "a=1 b=2"}},
},
{
"TestIpAddress #1.",
"http://1.2.3.4/foo",
[]string{"a=1; path=/"},
"a=1",
[]query{{"http://1.2.3.4/foo", "a=1"}},
},
{
"TestIpAddress #2.",
"http://1.2.3.4/foo",
[]string{
"a=1; domain=.1.2.3.4",
"b=2; domain=.3.4"},
"",
[]query{{"http://1.2.3.4/foo", ""}},
},
{
"TestIpAddress #3.",
"http://1.2.3.4/foo",
[]string{"a=1; domain=1.2.3.4"},
"",
[]query{{"http://1.2.3.4/foo", ""}},
},
{
"TestNonDottedAndTLD #2.",
"http://com./index.html",
[]string{"a=1"},
"a=1",
[]query{
{"http://com./index.html", "a=1"},
{"http://no-cookies.com./index.html", ""},
},
},
{
"TestNonDottedAndTLD #3.",
"http://a.b",
[]string{
"a=1; domain=.b",
"b=2; domain=b"},
"",
[]query{{"http://bar.foo", ""}},
},
{
"TestNonDottedAndTLD #4.",
"http://google.com",
[]string{
"a=1; domain=.com",
"b=2; domain=com"},
"",
[]query{{"http://google.com", ""}},
},
{
"TestNonDottedAndTLD #5.",
"http://google.co.uk",
[]string{
"a=1; domain=.co.uk",
"b=2; domain=.uk"},
"",
[]query{
{"http://google.co.uk", ""},
{"http://else.co.com", ""},
{"http://else.uk", ""},
},
},
{
"TestHostEndsWithDot.",
"http://www.google.com",
[]string{
"a=1",
"b=2; domain=.www.google.com."},
"a=1",
[]query{{"http://www.google.com", "a=1"}},
},
{
"PathTest",
"http://www.google.izzle",
[]string{"a=1; path=/wee"},
"a=1",
[]query{
{"http://www.google.izzle/wee", "a=1"},
{"http://www.google.izzle/wee/", "a=1"},
{"http://www.google.izzle/wee/war", "a=1"},
{"http://www.google.izzle/wee/war/more/more", "a=1"},
{"http://www.google.izzle/weehee", ""},
{"http://www.google.izzle/", ""},
},
},
}
func TestChromiumBasics(t *testing.T) {
for _, test := range chromiumBasicsTests {
jar := newTestJar()
test.run(t, jar)
}
}
// chromiumDomainTests contains jarTests which must be executed all on the
// same Jar.
var chromiumDomainTests = [...]jarTest{
{
"Fill #1.",
"http://www.google.izzle",
[]string{"A=B"},
"A=B",
[]query{{"http://www.google.izzle", "A=B"}},
},
{
"Fill #2.",
"http://www.google.izzle",
[]string{"C=D; domain=.google.izzle"},
"A=B C=D",
[]query{{"http://www.google.izzle", "A=B C=D"}},
},
{
"Verify A is a host cookie and not accessible from subdomain.",
"http://unused.nil",
[]string{},
"A=B C=D",
[]query{{"http://foo.www.google.izzle", "C=D"}},
},
{
"Verify domain cookies are found on proper domain.",
"http://www.google.izzle",
[]string{"E=F; domain=.www.google.izzle"},
"A=B C=D E=F",
[]query{{"http://www.google.izzle", "A=B C=D E=F"}},
},
{
"Leading dots in domain attributes are optional.",
"http://www.google.izzle",
[]string{"G=H; domain=www.google.izzle"},
"A=B C=D E=F G=H",
[]query{{"http://www.google.izzle", "A=B C=D E=F G=H"}},
},
{
"Verify domain enforcement works #1.",
"http://www.google.izzle",
[]string{"K=L; domain=.bar.www.google.izzle"},
"A=B C=D E=F G=H",
[]query{{"http://bar.www.google.izzle", "C=D E=F G=H"}},
},
{
"Verify domain enforcement works #2.",
"http://unused.nil",
[]string{},
"A=B C=D E=F G=H",
[]query{{"http://www.google.izzle", "A=B C=D E=F G=H"}},
},
}
func TestChromiumDomain(t *testing.T) {
jar := newTestJar()
for _, test := range chromiumDomainTests {
test.run(t, jar)
}
}
// chromiumDeletionTests must be performed all on the same Jar.
var chromiumDeletionTests = [...]jarTest{
{
"Create session cookie a1.",
"http://www.google.com",
[]string{"a=1"},
"a=1",
[]query{{"http://www.google.com", "a=1"}},
},
{
"Delete sc a1 via MaxAge.",
"http://www.google.com",
[]string{"a=1; max-age=-1"},
"",
[]query{{"http://www.google.com", ""}},
},
{
"Create session cookie b2.",
"http://www.google.com",
[]string{"b=2"},
"b=2",
[]query{{"http://www.google.com", "b=2"}},
},
{
"Delete sc b2 via Expires.",
"http://www.google.com",
[]string{"b=2; " + expiresIn(-10)},
"",
[]query{{"http://www.google.com", ""}},
},
{
"Create persistent cookie c3.",
"http://www.google.com",
[]string{"c=3; max-age=3600"},
"c=3",
[]query{{"http://www.google.com", "c=3"}},
},
{
"Delete pc c3 via MaxAge.",
"http://www.google.com",
[]string{"c=3; max-age=-1"},
"",
[]query{{"http://www.google.com", ""}},
},
{
"Create persistent cookie d4.",
"http://www.google.com",
[]string{"d=4; max-age=3600"},
"d=4",
[]query{{"http://www.google.com", "d=4"}},
},
{
"Delete pc d4 via Expires.",
"http://www.google.com",
[]string{"d=4; " + expiresIn(-10)},
"",
[]query{{"http://www.google.com", ""}},
},
}
func TestChromiumDeletion(t *testing.T) {
jar := newTestJar()
for _, test := range chromiumDeletionTests {
test.run(t, jar)
}
}
// domainHandlingTests tests and documents the rules for domain handling.
// Each test must be performed on an empty new Jar.
var domainHandlingTests = [...]jarTest{
{
"Host cookie",
"http://www.host.test",
[]string{"a=1"},
"a=1",
[]query{
{"http://www.host.test", "a=1"},
{"http://host.test", ""},
{"http://bar.host.test", ""},
{"http://foo.www.host.test", ""},
{"http://other.test", ""},
{"http://test", ""},
},
},
{
"Domain cookie #1",
"http://www.host.test",
[]string{"a=1; domain=host.test"},
"a=1",
[]query{
{"http://www.host.test", "a=1"},
{"http://host.test", "a=1"},
{"http://bar.host.test", "a=1"},
{"http://foo.www.host.test", "a=1"},
{"http://other.test", ""},
{"http://test", ""},
},
},
{
"Domain cookie #2",
"http://www.host.test",
[]string{"a=1; domain=.host.test"},
"a=1",
[]query{
{"http://www.host.test", "a=1"},
{"http://host.test", "a=1"},
{"http://bar.host.test", "a=1"},
{"http://foo.www.host.test", "a=1"},
{"http://other.test", ""},
{"http://test", ""},
},
},
{
"Host cookie on IDNA domain #1",
"http://www.bücher.test",
[]string{"a=1"},
"a=1",
[]query{
{"http://www.bücher.test", "a=1"},
{"http://www.xn--bcher-kva.test", "a=1"},
{"http://bücher.test", ""},
{"http://xn--bcher-kva.test", ""},
{"http://bar.bücher.test", ""},
{"http://bar.xn--bcher-kva.test", ""},
{"http://foo.www.bücher.test", ""},
{"http://foo.www.xn--bcher-kva.test", ""},
{"http://other.test", ""},
{"http://test", ""},
},
},
{
"Host cookie on IDNA domain #2",
"http://www.xn--bcher-kva.test",
[]string{"a=1"},
"a=1",
[]query{
{"http://www.bücher.test", "a=1"},
{"http://www.xn--bcher-kva.test", "a=1"},
{"http://bücher.test", ""},
{"http://xn--bcher-kva.test", ""},
{"http://bar.bücher.test", ""},
{"http://bar.xn--bcher-kva.test", ""},
{"http://foo.www.bücher.test", ""},
{"http://foo.www.xn--bcher-kva.test", ""},
{"http://other.test", ""},
{"http://test", ""},
},
},
{
"Domain cookie on IDNA domain #1",
"http://www.bücher.test",
[]string{"a=1; domain=xn--bcher-kva.test"},
"a=1",
[]query{
{"http://www.bücher.test", "a=1"},
{"http://www.xn--bcher-kva.test", "a=1"},
{"http://bücher.test", "a=1"},
{"http://xn--bcher-kva.test", "a=1"},
{"http://bar.bücher.test", "a=1"},
{"http://bar.xn--bcher-kva.test", "a=1"},
{"http://foo.www.bücher.test", "a=1"},
{"http://foo.www.xn--bcher-kva.test", "a=1"},
{"http://other.test", ""},
{"http://test", ""},
},
},
{
"Domain cookie on IDNA domain #2",
"http://www.xn--bcher-kva.test",
[]string{"a=1; domain=xn--bcher-kva.test"},
"a=1",
[]query{
{"http://www.bücher.test", "a=1"},
{"http://www.xn--bcher-kva.test", "a=1"},
{"http://bücher.test", "a=1"},
{"http://xn--bcher-kva.test", "a=1"},
{"http://bar.bücher.test", "a=1"},
{"http://bar.xn--bcher-kva.test", "a=1"},
{"http://foo.www.bücher.test", "a=1"},
{"http://foo.www.xn--bcher-kva.test", "a=1"},
{"http://other.test", ""},
{"http://test", ""},
},
},
{
"Host cookie on TLD.",
"http://com",
[]string{"a=1"},
"a=1",
[]query{
{"http://com", "a=1"},
{"http://any.com", ""},
{"http://any.test", ""},
},
},
{
"Domain cookie on TLD becomes a host cookie.",
"http://com",
[]string{"a=1; domain=com"},
"a=1",
[]query{
{"http://com", "a=1"},
{"http://any.com", ""},
{"http://any.test", ""},
},
},
{
"Host cookie on public suffix.",
"http://co.uk",
[]string{"a=1"},
"a=1",
[]query{
{"http://co.uk", "a=1"},
{"http://uk", ""},
{"http://some.co.uk", ""},
{"http://foo.some.co.uk", ""},
{"http://any.uk", ""},
},
},
{
"Domain cookie on public suffix is ignored.",
"http://some.co.uk",
[]string{"a=1; domain=co.uk"},
"",
[]query{
{"http://co.uk", ""},
{"http://uk", ""},
{"http://some.co.uk", ""},
{"http://foo.some.co.uk", ""},
{"http://any.uk", ""},
},
},
}
func TestDomainHandling(t *testing.T) {
for _, test := range domainHandlingTests {
jar := newTestJar()
test.run(t, jar)
}
}
func TestIssue19384(t *testing.T) {
cookies := []*http.Cookie{{Name: "name", Value: "value"}}
for _, host := range []string{"", ".", "..", "..."} {
jar, _ := New(nil)
u := &url.URL{Scheme: "http", Host: host, Path: "/"}
if got := jar.Cookies(u); len(got) != 0 {
t.Errorf("host %q, got %v", host, got)
}
jar.SetCookies(u, cookies)
if got := jar.Cookies(u); len(got) != 1 || got[0].Value != "value" {
t.Errorf("host %q, got %v", host, got)
}
}
}
|
if got := jarKey(host, nil); got != want {
t.Errorf("%q: got %q, want %q", host, got, want)
|
update-user.dto.ts
|
import {
IsString,
IsNotEmpty,
IsUrl,
IsOptional,
Length
} from 'class-validator';
export class UpdateUserDto {
/**
* Este campo é opcional, você pode criar agora ou depois fazendo um update.
* Caso decida passar a informação agora, não passe como fazio, este campo necessita
* de no mínimo 2 caractere para passar pela validação.
*
|
@IsNotEmpty()
@IsOptional()
@Length(2, 50)
name?: string;
/**
* Aqui você deve informar a url do avatar do usuário. Este campo é opciona, porém,
* ao informá-lo, passe uma url válida.
*
* @example 'https://www.google.com/google.jpg'
*/
@IsString()
@IsNotEmpty()
@IsOptional()
@IsUrl()
avatar?: string;
}
|
* @example 'João da Neve'
*/
@IsString()
|
postprocessor.py
|
import numpy as np
import tensorflow as tf
from lib.core.config import cfg
from lib.utils.anchors_util import project_to_bev
from lib.utils.box_3d_utils import box_3d_to_anchor
import lib.dataset.maps_dict as maps_dict
class
|
:
def __init__(self, stage, cls_num):
if stage == 0:
self.postprocessor_cfg = cfg.MODEL.FIRST_STAGE
elif stage == 1:
self.postprocessor_cfg = cfg.MODEL.SECOND_STAGE
else: raise Exception('Not Implementation Error')
self.max_output_size = self.postprocessor_cfg.MAX_OUTPUT_NUM
self.nms_threshold = self.postprocessor_cfg.NMS_THRESH
self.cls_num = cls_num
def class_unaware_format(self, pred_anchors_3d, pred_score):
""" (for rpn propose)
Change prediction format from class-aware-format to class-ignorance-format
pred_anchors_3d: [bs, points_num, 1/cls_num, 7]
pred_score: [bs, points_num, cls_num]
return: pred_anchors_3d: [bs, points_num, 1, 7]
pred_score: [bs, points_num, 1]
"""
unaware_pred_score = tf.reduce_max(pred_score, axis=-1, keepdims=True)
cls_num = pred_anchors_3d.get_shape().as_list()[2]
if cls_num == 1:
return pred_anchors_3d, unaware_pred_score
# class-aware in boundingbox prediction
pred_cls = tf.argmax(pred_score, axis=-1)
pred_cls_onehot = tf.cast(tf.one_hot(pred_cls, depth=cls_num, on_value=1, off_value=0, axis=-1), tf.float32)
# bs, pts_num, cls_num, 7
unaware_pred_anchors_3d = pred_anchors_3d * tf.expand_dims(pred_cls_onehot, axis=-1)
unaware_pred_anchors_3d = tf.reduce_sum(unaware_pred_anchors_3d, axis=2, keepdims=True)
return unaware_pred_anchors_3d, unaware_pred_score
def forward(self, pred_anchors_3d, pred_score, output_dict, pred_attribute=None, pred_velocity=None):
"""
pred_anchors_3d: [bs, points_num, 1/cls_num, 7]
pred_score: [bs, points_num, cls_num]
pred_attribute: [bs, points_num, 1/cls_num, 8]
pred_velocity: [bs, points_num, 1/cls_num, 2]
"""
cls_num = pred_score.get_shape().as_list()[-1]
if cls_num != self.cls_num: # format predictions to class-unaware predictions
assert pred_attribute == None and pred_velocity == None, 'Not support the predictions of attribute and velocity in RPN phase'
pred_anchors_3d, pred_score = self.class_unaware_format(pred_anchors_3d, pred_score)
pred_anchors_3d_list = tf.unstack(pred_anchors_3d, axis=0)
pred_scores_list = tf.unstack(pred_score, axis=0)
pred_3d_bbox_list = []
pred_3d_cls_score_list = []
pred_3d_cls_cat_list = []
pred_attribute_list = []
pred_velocity_list = []
for batch_idx, pred_anchors_3d, pred_scores in zip(range(len(pred_anchors_3d_list)), pred_anchors_3d_list, pred_scores_list):
cur_pred_3d_bbox_list = []
cur_pred_3d_cls_score_list = []
cur_pred_3d_cls_cat_list = []
cur_pred_attribute_list = []
cur_pred_velocity_list = []
for i in range(self.cls_num):
reg_i = min(i, pred_anchors_3d.get_shape().as_list()[1] - 1)
cur_pred_anchors_3d = pred_anchors_3d[:, reg_i, :]
cur_pred_anchors = box_3d_to_anchor(cur_pred_anchors_3d)
cur_pred_anchors_bev = project_to_bev(cur_pred_anchors) # [-1, 4]
cur_cls_score = pred_scores[:, i]
nms_index = tf.image.non_max_suppression(cur_pred_anchors_bev, cur_cls_score, max_output_size=self.max_output_size, iou_threshold=self.nms_threshold)
cur_pred_3d_bbox_list.append(tf.gather(cur_pred_anchors_3d, nms_index))
cur_pred_3d_cls_score_list.append(tf.gather(cur_cls_score, nms_index))
cur_pred_3d_cls_cat_list.append(tf.cast(tf.ones_like(nms_index), tf.int32) * i)
if pred_attribute is not None:
cur_pred_attribute_list.append(tf.gather(pred_attribute[batch_idx, :, reg_i, :], nms_index))
if pred_velocity is not None:
cur_pred_velocity_list.append(tf.gather(pred_velocity[batch_idx, :, reg_i, :], nms_index))
cur_pred_3d_bbox_list = tf.concat(cur_pred_3d_bbox_list, axis=0)
cur_pred_3d_cls_score_list = tf.concat(cur_pred_3d_cls_score_list, axis=0)
cur_pred_3d_cls_cat_list = tf.concat(cur_pred_3d_cls_cat_list, axis=0)
pred_3d_bbox_list.append(cur_pred_3d_bbox_list)
pred_3d_cls_score_list.append(cur_pred_3d_cls_score_list)
pred_3d_cls_cat_list.append(cur_pred_3d_cls_cat_list)
if pred_attribute is not None:
cur_pred_attribute_list = tf.concat(cur_pred_attribute_list, axis=0)
pred_attribute_list.append(cur_pred_attribute_list)
if pred_velocity is not None:
cur_pred_velocity_list = tf.concat(cur_pred_velocity_list, axis=0)
pred_velocity_list.append(cur_pred_velocity_list)
pred_3d_bbox_list = tf.stack(pred_3d_bbox_list, axis=0)
pred_3d_cls_score_list = tf.stack(pred_3d_cls_score_list, axis=0)
pred_3d_cls_cat_list = tf.stack(pred_3d_cls_cat_list, axis=0)
output_dict[maps_dict.PRED_3D_BBOX].append(pred_3d_bbox_list)
output_dict[maps_dict.PRED_3D_SCORE].append(pred_3d_cls_score_list)
output_dict[maps_dict.PRED_3D_CLS_CATEGORY].append(pred_3d_cls_cat_list)
if pred_attribute is not None:
output_dict[maps_dict.PRED_3D_ATTRIBUTE].append(tf.stack(pred_attribute_list, axis=0))
if pred_velocity is not None:
output_dict[maps_dict.PRED_3D_VELOCITY].append(tf.stack(pred_velocity_list, axis=0))
return output_dict
|
PostProcessor
|
class_v_s_t_g_u_i_1_1_c_split_view.js
|
var class_v_s_t_g_u_i_1_1_c_split_view =
[
[ "ResizeMethod", "class_v_s_t_g_u_i_1_1_c_split_view.html#a7df5e03e40355a107ef2220b9b05f610", [
[ "kResizeFirstView", "class_v_s_t_g_u_i_1_1_c_split_view.html#a7df5e03e40355a107ef2220b9b05f610a23b9759159049adfbe27a4c1f2cbe013", null ],
[ "kResizeSecondView", "class_v_s_t_g_u_i_1_1_c_split_view.html#a7df5e03e40355a107ef2220b9b05f610a566d4c99a011f581749287a7feea4b7b", null ],
[ "kResizeLastView", "class_v_s_t_g_u_i_1_1_c_split_view.html#a7df5e03e40355a107ef2220b9b05f610a2b9e4767d48c607b24346cb73db98497", null ],
[ "kResizeAllViews", "class_v_s_t_g_u_i_1_1_c_split_view.html#a7df5e03e40355a107ef2220b9b05f610a27f26a9a59366b86b6e4722fbd0db487", null ]
] ],
[ "Style", "class_v_s_t_g_u_i_1_1_c_split_view.html#a064d2685e7efa9ff1356951379364327", [
[ "kHorizontal", "class_v_s_t_g_u_i_1_1_c_split_view.html#a064d2685e7efa9ff1356951379364327a8b9df72c61f890f49cc220e0bde867ce", null ],
[ "kVertical", "class_v_s_t_g_u_i_1_1_c_split_view.html#a064d2685e7efa9ff1356951379364327a5d1a83c47439dfd8497cae026b9205e1", null ]
] ],
[ "CSplitView", "class_v_s_t_g_u_i_1_1_c_split_view.html#a2e776305cb0e006c0079014090715ee5", null ],
[ "~CSplitView", "class_v_s_t_g_u_i_1_1_c_split_view.html#acc09c7c51cb6fced475ef03ae8f14d56", null ],
[ "addView", "class_v_s_t_g_u_i_1_1_c_split_view.html#a230bff0584135796ccef3fd6d7d68f38", null ],
[ "addView", "class_v_s_t_g_u_i_1_1_c_split_view.html#abe34c3d3957bf5a5b2f9f696117f8c44", null ],
[ "addView", "class_v_s_t_g_u_i_1_1_c_split_view.html#a007211558f8f77a1ee22d550aa90f35b", null ],
[ "addViewToSeparator", "class_v_s_t_g_u_i_1_1_c_split_view.html#aeb594123dd99558cef187f527c9239c3", null ],
[ "attached", "class_v_s_t_g_u_i_1_1_c_split_view.html#a9a47ba1b3f9c1bc4acc46f01b77342e1", null ],
[ "getDrawer", "class_v_s_t_g_u_i_1_1_c_split_view.html#af06f03e61b4c9afc138ca34564f5d0e2", null ],
[ "getResizeMethod", "class_v_s_t_g_u_i_1_1_c_split_view.html#a62367ab40d9087d3ac96fd109c2663ba", null ],
[ "getSeparatorWidth", "class_v_s_t_g_u_i_1_1_c_split_view.html#afa36500f8deef7f576efb99bb09f41b8", null ],
[ "getStyle", "class_v_s_t_g_u_i_1_1_c_split_view.html#afb6bd73606c8a6326163720f29e7211d", null ],
[ "removeAll", "class_v_s_t_g_u_i_1_1_c_split_view.html#a1871d4127e0338a7e0938511ccbc4faa", null ],
[ "removed", "class_v_s_t_g_u_i_1_1_c_split_view.html#af9d9fd929ea6a071a14f2a75bebd51c0", null ],
[ "removeView", "class_v_s_t_g_u_i_1_1_c_split_view.html#acf6751c705ff5b2404e5dae3c3becb17", null ],
|
[ "requestNewSeparatorSize", "class_v_s_t_g_u_i_1_1_c_split_view.html#a273d4f88289103513860445777feab07", null ],
[ "resizeFirstView", "class_v_s_t_g_u_i_1_1_c_split_view.html#abe0d1800aa04739465792917c3db55a0", null ],
[ "resizeLastView", "class_v_s_t_g_u_i_1_1_c_split_view.html#aa2618b05922970bb41e3e0c4064a02a9", null ],
[ "resizeSecondView", "class_v_s_t_g_u_i_1_1_c_split_view.html#a39ec6ebcf46f4ae848c122cab7ee98e9", null ],
[ "resizeViewsEqual", "class_v_s_t_g_u_i_1_1_c_split_view.html#af4633baa5a725d0765c351d75b444908", null ],
[ "setResizeMethod", "class_v_s_t_g_u_i_1_1_c_split_view.html#a9e8af546db2772ae4c9f7d6fe8c80c1f", null ],
[ "setSeparatorWidth", "class_v_s_t_g_u_i_1_1_c_split_view.html#a708670aa21b5e3db858f812f34c7c21a", null ],
[ "setStyle", "class_v_s_t_g_u_i_1_1_c_split_view.html#a7758d42ce21876bad727009919cb9e74", null ],
[ "setViewSize", "class_v_s_t_g_u_i_1_1_c_split_view.html#afe51e7c243aa90b6a551772533f1cfe3", null ],
[ "sizeToFit", "class_v_s_t_g_u_i_1_1_c_split_view.html#a01bce002411139ac6e92932bd94a52d3", null ],
[ "storeViewSizes", "class_v_s_t_g_u_i_1_1_c_split_view.html#afad20b6fd042dcabe536c75bb74c7e06", null ],
[ "resizeMethod", "class_v_s_t_g_u_i_1_1_c_split_view.html#aac26724b1544bda785523043b354d23d", null ],
[ "separatorDrawer", "class_v_s_t_g_u_i_1_1_c_split_view.html#ab675013abdc2e1c2ce2561384a9b9571", null ],
[ "separatorWidth", "class_v_s_t_g_u_i_1_1_c_split_view.html#a117fc71d74d2a08314fba7b45e559355", null ],
[ "style", "class_v_s_t_g_u_i_1_1_c_split_view.html#a023476464d7ae2b4a2c68692847ea9d5", null ]
];
| |
main.go
|
package main
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"time"
)
func main()
|
func fetch(url string, ch chan<- string) {
start := time.Now()
resp, err := http.Get(url)
if err != nil {
ch <- fmt.Sprint(err) // send to channel ch
return
}
nbytes, err := io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close() // don't leak resources
if err != nil {
ch <- fmt.Sprintf("while reading %s: %v", url, err)
return
}
secs := time.Since(start).Seconds()
ch <- fmt.Sprintf("%.2fs %7d %s", secs, nbytes, url)
}
|
{
start := time.Now()
ch := make(chan string)
for _, url := range os.Args[1:] {
go fetch(url, ch) // start a goroutine
}
for range os.Args[1:] {
fmt.Println(<-ch) // receive from channel ch
}
fmt.Printf("%.2fs elapsed\n", time.Since(start).Seconds())
}
|
model_helper.rs
|
use super::model::FlattenedFieldType;
use byteorder::ByteOrder;
pub trait LittleEndianParser {
fn parse(serialized: &[u8]) -> Self;
}
impl LittleEndianParser for i8 {
fn parse(serialized: &[u8]) -> Self {
serialized[0] as i8
}
}
impl LittleEndianParser for u8 {
fn parse(serialized: &[u8]) -> Self {
serialized[0]
}
}
impl LittleEndianParser for i16 {
fn parse(serialized: &[u8]) -> Self {
byteorder::LittleEndian::read_i16(serialized)
}
}
impl LittleEndianParser for u16 {
fn parse(serialized: &[u8]) -> Self {
byteorder::LittleEndian::read_u16(serialized)
}
}
impl LittleEndianParser for i32 {
fn parse(serialized: &[u8]) -> Self {
byteorder::LittleEndian::read_i32(serialized)
}
}
impl LittleEndianParser for u32 {
fn parse(serialized: &[u8]) -> Self {
byteorder::LittleEndian::read_u32(serialized)
}
}
impl LittleEndianParser for i64 {
fn parse(serialized: &[u8]) -> Self {
byteorder::LittleEndian::read_i64(serialized)
}
}
impl LittleEndianParser for u64 {
fn parse(serialized: &[u8]) -> Self {
|
byteorder::LittleEndian::read_u64(serialized)
}
}
impl LittleEndianParser for f32 {
fn parse(serialized: &[u8]) -> Self {
byteorder::LittleEndian::read_f32(serialized)
}
}
impl LittleEndianParser for f64 {
fn parse(serialized: &[u8]) -> Self {
byteorder::LittleEndian::read_f64(serialized)
}
}
impl LittleEndianParser for char {
fn parse(serialized: &[u8]) -> Self {
serialized[0] as char
}
}
impl LittleEndianParser for bool {
fn parse(serialized: &[u8]) -> Self {
serialized[0] != 0
}
}
pub trait FlattenedFieldTypeMatcher {
fn matches(flat_field_type: &FlattenedFieldType) -> bool;
}
impl FlattenedFieldTypeMatcher for i8 {
fn matches(flat_field_type: &FlattenedFieldType) -> bool {
*flat_field_type == FlattenedFieldType::Int8
}
}
impl FlattenedFieldTypeMatcher for u8 {
fn matches(flat_field_type: &FlattenedFieldType) -> bool {
*flat_field_type == FlattenedFieldType::UInt8
}
}
impl FlattenedFieldTypeMatcher for i16 {
fn matches(flat_field_type: &FlattenedFieldType) -> bool {
*flat_field_type == FlattenedFieldType::Int16
}
}
impl FlattenedFieldTypeMatcher for u16 {
fn matches(flat_field_type: &FlattenedFieldType) -> bool {
*flat_field_type == FlattenedFieldType::UInt16
}
}
impl FlattenedFieldTypeMatcher for i32 {
fn matches(flat_field_type: &FlattenedFieldType) -> bool {
*flat_field_type == FlattenedFieldType::Int32
}
}
impl FlattenedFieldTypeMatcher for u32 {
fn matches(flat_field_type: &FlattenedFieldType) -> bool {
*flat_field_type == FlattenedFieldType::UInt32
}
}
impl FlattenedFieldTypeMatcher for i64 {
fn matches(flat_field_type: &FlattenedFieldType) -> bool {
*flat_field_type == FlattenedFieldType::Int64
}
}
impl FlattenedFieldTypeMatcher for u64 {
fn matches(flat_field_type: &FlattenedFieldType) -> bool {
*flat_field_type == FlattenedFieldType::UInt64
}
}
impl FlattenedFieldTypeMatcher for f32 {
fn matches(flat_field_type: &FlattenedFieldType) -> bool {
*flat_field_type == FlattenedFieldType::Float
}
}
impl FlattenedFieldTypeMatcher for f64 {
fn matches(flat_field_type: &FlattenedFieldType) -> bool {
*flat_field_type == FlattenedFieldType::Double
}
}
impl FlattenedFieldTypeMatcher for char {
fn matches(flat_field_type: &FlattenedFieldType) -> bool {
*flat_field_type == FlattenedFieldType::Char
}
}
impl FlattenedFieldTypeMatcher for bool {
fn matches(flat_field_type: &FlattenedFieldType) -> bool {
*flat_field_type == FlattenedFieldType::Bool
}
}
| |
AbstractHistogram.encoding.ts
|
/*
* This is a TypeScript port of the original Java version, which was written by
* Gil Tene as described in
* https://github.com/HdrHistogram/HdrHistogram
* and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
import ByteBuffer from "./ByteBuffer";
import { AbstractHistogram, HistogramConstructor } from "./AbstractHistogram";
import ZigZagEncoding from "./ZigZagEncoding";
import PackedHistogram from "./PackedHistogram";
import Int8Histogram from "./Int8Histogram";
import Int16Histogram from "./Int16Histogram";
import Int32Histogram from "./Int32Histogram";
import Float64Histogram from "./Float64Histogram";
// @ts-ignore
import * as pako from "pako";
// @ts-ignore
import * as base64 from "base64-js";
const { max } = Math;
const V2EncodingCookieBase = 0x1c849303;
const V2CompressedEncodingCookieBase = 0x1c849304;
const V2maxWordSizeInBytes = 9; // LEB128-64b9B + ZigZag require up to 9 bytes per word
const encodingCookie = V2EncodingCookieBase | 0x10; // LSBit of wordsize byte indicates TLZE Encoding
const compressedEncodingCookie = V2CompressedEncodingCookieBase | 0x10; // LSBit of wordsize byte indicates TLZE Encoding
function fillBufferFromCountsArray(
self: AbstractHistogram,
buffer: ByteBuffer
) {
if (!self.countsArrayIndex) {
console.log(self);
}
const countsLimit = self.countsArrayIndex(self.maxValue) + 1;
let srcIndex = 0;
while (srcIndex < countsLimit) {
// V2 encoding format uses a ZigZag LEB128-64b9B encoded long. Positive values are counts,
// while negative values indicate a repeat zero counts.
const count = self.getCountAtIndex(srcIndex++);
if (count < 0) {
throw new Error(
"Cannot encode histogram containing negative counts (" +
count +
") at index " +
srcIndex +
", corresponding the value range [" +
self.lowestEquivalentValue(self.valueFromIndex(srcIndex)) +
"," +
self.nextNonEquivalentValue(self.valueFromIndex(srcIndex)) +
")"
);
}
// Count trailing 0s (which follow this count):
let zerosCount = 0;
if (count == 0) {
zerosCount = 1;
|
}
}
if (zerosCount > 1) {
ZigZagEncoding.encode(buffer, -zerosCount);
} else {
ZigZagEncoding.encode(buffer, count);
}
}
}
/**
* Encode this histogram into a ByteBuffer
* @param self this histogram
* @param buffer The buffer to encode into
* @return The number of bytes written to the buffer
*/
function encodeIntoByteBuffer(self: AbstractHistogram, buffer: ByteBuffer) {
const initialPosition = buffer.position;
buffer.putInt32(encodingCookie);
buffer.putInt32(0); // Placeholder for payload length in bytes.
buffer.putInt32(1);
buffer.putInt32(self.numberOfSignificantValueDigits);
buffer.putInt64(self.lowestDiscernibleValue);
buffer.putInt64(self.highestTrackableValue);
buffer.putInt64(1);
const payloadStartPosition = buffer.position;
fillBufferFromCountsArray(self, buffer);
const backupIndex = buffer.position;
buffer.position = initialPosition + 4;
buffer.putInt32(backupIndex - payloadStartPosition); // Record the payload length
buffer.position = backupIndex;
return backupIndex - initialPosition;
}
function fillCountsArrayFromSourceBuffer(
self: AbstractHistogram,
sourceBuffer: ByteBuffer,
lengthInBytes: number,
wordSizeInBytes: number
) {
if (
wordSizeInBytes != 2 &&
wordSizeInBytes != 4 &&
wordSizeInBytes != 8 &&
wordSizeInBytes != V2maxWordSizeInBytes
) {
throw new Error(
"word size must be 2, 4, 8, or V2maxWordSizeInBytes (" +
V2maxWordSizeInBytes +
") bytes"
);
}
let dstIndex = 0;
const endPosition = sourceBuffer.position + lengthInBytes;
while (sourceBuffer.position < endPosition) {
let zerosCount = 0;
let count = ZigZagEncoding.decode(sourceBuffer);
if (count < 0) {
zerosCount = -count;
dstIndex += zerosCount; // No need to set zeros in array. Just skip them.
} else {
self.setCountAtIndex(dstIndex++, count);
}
}
return dstIndex; // this is the destination length
}
function getCookieBase(cookie: number): number {
return cookie & ~0xf0;
}
function getWordSizeInBytesFromCookie(cookie: number): number {
if (
getCookieBase(cookie) == V2EncodingCookieBase ||
getCookieBase(cookie) == V2CompressedEncodingCookieBase
) {
return V2maxWordSizeInBytes;
}
const sizeByte = (cookie & 0xf0) >> 4;
return sizeByte & 0xe;
}
function findDeflateFunction() {
try {
return eval('require("zlib").deflateSync');
} catch (error) {
return pako.deflate;
}
}
function findInflateFunction() {
try {
return eval('require("zlib").inflateSync');
} catch (error) {
return pako.inflate;
}
}
const deflate = findDeflateFunction();
const inflate = findInflateFunction();
export function decompress(data: Uint8Array): Uint8Array {
const buffer = new ByteBuffer(data);
const initialTargetPosition = buffer.position;
const cookie = buffer.getInt32();
if ((cookie & ~0xf0) !== V2CompressedEncodingCookieBase) {
throw new Error("Encoding not supported, only V2 is supported");
}
const lengthOfCompressedContents = buffer.getInt32();
const uncompressedBuffer: Uint8Array = inflate(
buffer.data.slice(
initialTargetPosition + 8,
initialTargetPosition + 8 + lengthOfCompressedContents
)
);
return uncompressedBuffer;
}
function ctrFromBucketSize(
bitBucketSize: 8 | 16 | 32 | 64 | "packed"
): HistogramConstructor {
switch (bitBucketSize) {
case "packed":
return PackedHistogram;
case 8:
return Int8Histogram;
case 16:
return Int16Histogram;
case 32:
return Int32Histogram;
case 64:
return Float64Histogram;
default:
throw new Error("Incorrect parameter bitBucketSize");
}
}
export function doDecode(
data: Uint8Array,
bitBucketSize: 8 | 16 | 32 | 64 | "packed" = 32,
minBarForHighestTrackableValue: number = 0
) {
const buffer = new ByteBuffer(data);
const cookie = buffer.getInt32();
let payloadLengthInBytes: number;
let numberOfSignificantValueDigits: number;
let lowestTrackableUnitValue: number;
let highestTrackableValue: number;
if (getCookieBase(cookie) === V2EncodingCookieBase) {
if (getWordSizeInBytesFromCookie(cookie) != V2maxWordSizeInBytes) {
throw new Error(
"The buffer does not contain a Histogram (no valid cookie found)"
);
}
payloadLengthInBytes = buffer.getInt32();
buffer.getInt32(); // normalizingIndexOffset not used
numberOfSignificantValueDigits = buffer.getInt32();
lowestTrackableUnitValue = buffer.getInt64();
highestTrackableValue = buffer.getInt64();
buffer.getInt64(); // integerToDoubleValueConversionRatio not used
} else {
throw new Error(
"The buffer does not contain a Histogram (no valid V2 encoding cookie found)"
);
}
highestTrackableValue = max(
highestTrackableValue,
minBarForHighestTrackableValue
);
const histogramConstr = ctrFromBucketSize(bitBucketSize);
const histogram = new histogramConstr(
lowestTrackableUnitValue,
highestTrackableValue,
numberOfSignificantValueDigits
);
const filledLength = fillCountsArrayFromSourceBuffer(
histogram,
buffer,
payloadLengthInBytes,
V2maxWordSizeInBytes
);
histogram.establishInternalTackingValues(filledLength);
return histogram;
}
function doEncodeIntoCompressedBase64(compressionLevel?: number): string {
const compressionOptions = compressionLevel
? { level: compressionLevel }
: {};
const self: AbstractHistogram = this as any;
const targetBuffer = ByteBuffer.allocate();
targetBuffer.putInt32(compressedEncodingCookie);
const intermediateUncompressedByteBuffer = ByteBuffer.allocate();
const uncompressedLength = encodeIntoByteBuffer(
self,
intermediateUncompressedByteBuffer
);
const data = intermediateUncompressedByteBuffer.data.slice(
0,
uncompressedLength
);
const compressedData: Uint8Array = deflate(data, compressionOptions);
targetBuffer.putInt32(compressedData.byteLength);
targetBuffer.putArray(compressedData);
return base64.fromByteArray(targetBuffer.data);
}
declare module "./AbstractHistogram" {
namespace AbstractHistogram {
export let decode: typeof doDecode;
}
}
AbstractHistogram.decode = doDecode;
declare module "./AbstractHistogram" {
interface AbstractHistogram {
encodeIntoCompressedBase64: typeof doEncodeIntoCompressedBase64;
}
}
AbstractHistogram.prototype.encodeIntoCompressedBase64 = doEncodeIntoCompressedBase64;
|
while (srcIndex < countsLimit && self.getCountAtIndex(srcIndex) == 0) {
zerosCount++;
srcIndex++;
|
tests.rs
|
use std::num;
use std::str::FromStr;
use proptest::prelude::*;
use proptest::strategy::Strategy;
use test_strategy::proptest;
use crate::{
CompactString,
ToCompactString,
};
#[cfg(target_pointer_width = "64")]
const MAX_SIZE: usize = 24;
#[cfg(target_pointer_width = "32")]
const MAX_SIZE: usize = 12;
/// generates random unicode strings, upto 80 chars long
pub fn rand_unicode() -> impl Strategy<Value = String> {
proptest::collection::vec(proptest::char::any(), 0..80).prop_map(|v| v.into_iter().collect())
}
/// generates a random collection of bytes, upto 80 bytes long
pub fn rand_bytes() -> impl Strategy<Value = Vec<u8>> {
proptest::collection::vec(any::<u8>(), 0..80)
}
/// [`proptest::strategy::Strategy`] that generates [`String`]s with up to `len` bytes
pub fn rand_unicode_with_max_len(len: usize) -> impl Strategy<Value = String> {
proptest::collection::vec(proptest::char::any(), 0..len).prop_map(move |chars| {
let mut len_utf8 = 0;
chars
.into_iter()
.take_while(|c| {
len_utf8 += c.len_utf8();
len_utf8 <= len
})
.collect::<String>()
})
}
/// generates groups upto 40 strings long of random unicode strings, upto 80 chars long
fn rand_unicode_collection() -> impl Strategy<Value = Vec<String>> {
proptest::collection::vec(rand_unicode(), 0..40)
}
/// Asserts a [`CompactString`] is allocated properly
fn assert_allocated_properly(compact: &CompactString) {
if compact.len() <= MAX_SIZE {
assert!(!compact.is_heap_allocated())
} else {
assert!(compact.is_heap_allocated())
}
}
#[proptest]
#[cfg_attr(miri, ignore)]
fn proptest_strings_roundtrip(#[strategy(rand_unicode())] word: String) {
let compact = CompactString::new(&word);
prop_assert_eq!(&word, &compact);
}
#[proptest]
#[cfg_attr(miri, ignore)]
fn proptest_strings_allocated_properly(#[strategy(rand_unicode())] word: String) {
let compact = CompactString::new(&word);
assert_allocated_properly(&compact);
}
#[proptest]
#[cfg_attr(miri, ignore)]
fn proptest_char_iterator_roundtrips(#[strategy(rand_unicode())] word: String)
|
#[proptest]
#[cfg_attr(miri, ignore)]
fn proptest_string_iterator_roundtrips(
#[strategy(rand_unicode_collection())] collection: Vec<String>,
) {
let compact: CompactString = collection.clone().into_iter().collect();
let word: String = collection.into_iter().collect();
prop_assert_eq!(&word, &compact);
}
#[proptest]
#[cfg_attr(miri, ignore)]
fn proptest_from_bytes_roundtrips(#[strategy(rand_unicode())] word: String) {
let bytes = word.into_bytes();
let compact = CompactString::from_utf8(&bytes).unwrap();
let word = String::from_utf8(bytes).unwrap();
prop_assert_eq!(compact, word);
}
#[proptest]
#[cfg_attr(miri, ignore)]
fn proptest_from_bytes_only_valid_utf8(#[strategy(rand_bytes())] bytes: Vec<u8>) {
let compact_result = CompactString::from_utf8(&bytes);
let word_result = String::from_utf8(bytes);
match (compact_result, word_result) {
(Ok(c), Ok(s)) => prop_assert_eq!(c, s),
(Err(c_err), Err(s_err)) => prop_assert_eq!(c_err, s_err.utf8_error()),
_ => panic!("CompactString and core::str read UTF-8 differently?"),
}
}
#[proptest]
#[cfg_attr(miri, ignore)]
fn proptest_from_lossy_cow_roundtrips(#[strategy(rand_bytes())] bytes: Vec<u8>) {
let cow = String::from_utf8_lossy(&bytes[..]);
let compact = CompactString::from(cow.clone());
prop_assert_eq!(cow, compact);
}
#[proptest]
#[cfg_attr(miri, ignore)]
fn proptest_reserve_and_write_bytes(#[strategy(rand_unicode())] word: String) {
let mut compact = CompactString::default();
prop_assert!(compact.is_empty());
// reserve enough space to write our bytes
compact.reserve(word.len());
// SAFETY: We're writing a String which we know is UTF-8
let slice = unsafe { compact.as_mut_bytes() };
slice[..word.len()].copy_from_slice(word.as_bytes());
// SAFTEY: We know this is the length of our string, since `compact` started with 0 bytes
// and we just wrote `word.len()` bytes
unsafe { compact.set_len(word.len()) }
prop_assert_eq!(&word, &compact);
}
#[proptest]
#[cfg_attr(miri, ignore)]
fn proptest_reserve_and_write_bytes_allocated_properly(#[strategy(rand_unicode())] word: String) {
let mut compact = CompactString::default();
prop_assert!(compact.is_empty());
// reserve enough space to write our bytes
compact.reserve(word.len());
// SAFETY: We're writing a String which we know is UTF-8
let slice = unsafe { compact.as_mut_bytes() };
slice[..word.len()].copy_from_slice(word.as_bytes());
// SAFTEY: We know this is the length of our string, since `compact` started with 0 bytes
// and we just wrote `word.len()` bytes
unsafe { compact.set_len(word.len()) }
prop_assert_eq!(compact.len(), word.len());
// The string should be heap allocated if `word` was > MAX_SIZE
//
// NOTE: The reserve and write API's don't currently support the Packed representation
prop_assert_eq!(compact.is_heap_allocated(), word.len() > MAX_SIZE);
}
#[proptest]
#[cfg_attr(miri, ignore)]
fn proptest_arbitrary_compact_string_converts_to_string(#[strategy(rand_unicode())] word: String) {
let compact = CompactString::new(&word);
let result = String::from(compact);
prop_assert_eq!(result.len(), word.len());
prop_assert_eq!(result, word);
}
#[proptest]
#[cfg_attr(miri, ignore)]
fn proptest_extend_chars_allocated_properly(
#[strategy(rand_unicode())] start: String,
#[strategy(rand_unicode())] extend: String,
) {
let mut compact = CompactString::new(&start);
compact.extend(extend.chars());
let mut control = start.clone();
control.extend(extend.chars());
prop_assert_eq!(&compact, &control);
assert_allocated_properly(&compact);
}
#[test]
fn proptest_const_creation() {
const EMPTY: CompactString = CompactString::new_inline("");
const SHORT: CompactString = CompactString::new_inline("rust");
#[cfg(target_pointer_width = "64")]
const PACKED: CompactString = CompactString::new_inline("i am 24 characters long!");
#[cfg(target_pointer_width = "32")]
const PACKED: CompactString = CompactString::new_inline("i am 12 char");
assert_eq!(EMPTY, CompactString::new(""));
assert_eq!(SHORT, CompactString::new("rust"));
#[cfg(target_pointer_width = "64")]
assert_eq!(PACKED, CompactString::new("i am 24 characters long!"));
#[cfg(target_pointer_width = "32")]
assert_eq!(PACKED, CompactString::new("i am 12 char"));
}
#[test]
fn test_short_ascii() {
// always inlined on all archs
let strs = vec!["nyc", "statue", "liberty", "img_1234.png"];
for s in strs {
let compact = CompactString::new(s);
assert_eq!(compact, s);
assert_eq!(s, compact);
assert_eq!(compact.is_heap_allocated(), false);
}
}
#[test]
fn test_short_unicode() {
let strs = vec![
("🦀", false),
("🌧☀️", false),
// str is 12 bytes long, and leading character is non-ASCII
("咬𓅈ꁈ:_", false),
];
for (s, is_heap) in strs {
let compact = CompactString::new(s);
assert_eq!(compact, s);
assert_eq!(s, compact);
assert_eq!(compact.is_heap_allocated(), is_heap);
}
}
#[test]
fn test_medium_ascii() {
let strs = vec![
"rustconf 2021",
"new york city",
"nyc pizza is good",
"test the 24 char limit!!",
];
for s in strs {
let compact = CompactString::new(s);
assert_eq!(compact, s);
assert_eq!(s, compact);
#[cfg(target_pointer_width = "64")]
let is_heap = false;
#[cfg(target_pointer_width = "32")]
let is_heap = true;
assert_eq!(compact.is_heap_allocated(), is_heap);
}
}
#[test]
fn test_medium_unicode() {
let strs = vec![
("☕️👀😁🎉", false),
// str is 24 bytes long, and leading character is non-ASCII
("🦀😀😃😄😁🦀", false),
];
#[allow(unused_variables)]
for (s, is_heap) in strs {
let compact = CompactString::new(s);
assert_eq!(compact, s);
assert_eq!(s, compact);
#[cfg(target_pointer_width = "64")]
let is_heap = is_heap;
#[cfg(target_pointer_width = "32")]
let is_heap = true;
assert_eq!(compact.is_heap_allocated(), is_heap);
}
}
#[test]
fn test_from_str_trait() {
let s = "hello_world";
// Until the never type `!` is stabilized, we have to unwrap here
let c = CompactString::from_str(s).unwrap();
assert_eq!(s, c);
}
#[test]
#[cfg_attr(target_pointer_width = "32", ignore)]
fn test_from_char_iter() {
let s = "\u{0} 0 \u{0}a𐀀𐀀 𐀀a𐀀";
println!("{}", s.len());
let compact: CompactString = s.chars().into_iter().collect();
assert!(!compact.is_heap_allocated());
assert_eq!(s, compact);
}
#[test]
#[cfg_attr(target_pointer_width = "32", ignore)]
fn test_extend_packed_from_empty() {
let s = " 0\u{80}A\u{0}𐀀 𐀀¡a𐀀0";
let mut compact = CompactString::new(s);
assert!(!compact.is_heap_allocated());
// extend from an empty iterator
compact.extend("".chars());
// we should still be heap allocated
assert!(!compact.is_heap_allocated());
}
#[test]
fn test_pop_empty() {
let num_pops = 256;
let mut compact = CompactString::from("");
(0..num_pops).for_each(|_| {
let ch = compact.pop();
assert!(ch.is_none());
});
assert!(compact.is_empty());
assert_eq!(compact, "");
}
#[test]
fn test_extend_from_empty_strs() {
let strs = vec![
"", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "",
"", "",
];
let compact: CompactString = strs.clone().into_iter().collect();
assert_eq!(compact, "");
assert!(compact.is_empty());
assert!(!compact.is_heap_allocated());
}
#[test]
fn test_compact_str_is_send_and_sync() {
fn is_send_and_sync<T: Send + Sync>() {}
is_send_and_sync::<CompactString>();
}
#[test]
fn test_fmt_write() {
use core::fmt::Write;
let mut compact = CompactString::default();
write!(compact, "test").unwrap();
assert_eq!(compact, "test");
writeln!(compact, "{}", 1234).unwrap();
assert_eq!(compact, "test1234\n");
write!(compact, "{:>8} {} {:<8}", "some", "more", "words").unwrap();
assert_eq!(compact, "test1234\n some more words ");
}
#[test]
fn test_plus_operator() {
assert_eq!(CompactString::from("a") + CompactString::from("b"), "ab");
assert_eq!(CompactString::from("a") + &CompactString::from("b"), "ab");
assert_eq!(CompactString::from("a") + "b", "ab");
assert_eq!(CompactString::from("a") + &String::from("b"), "ab");
assert_eq!(CompactString::from("a") + String::from("b"), "ab");
assert_eq!(String::from("a") + CompactString::from("b"), "ab");
}
#[test]
fn test_u8_to_compact_string() {
let vals = [u8::MIN, 1, 42, u8::MAX - 2, u8::MAX - 1, u8::MAX];
for x in &vals {
let c = x.to_compact_string();
let s = x.to_string();
assert_eq!(c, s);
assert!(!c.is_heap_allocated());
}
}
#[test]
fn test_i8_to_compact_string() {
let vals = [
i8::MIN,
i8::MIN + 1,
i8::MIN + 2,
-1,
0,
1,
42,
i8::MAX - 2,
i8::MAX - 1,
i8::MAX,
];
for x in &vals {
let c = x.to_compact_string();
let s = x.to_string();
assert_eq!(c, s);
assert!(!c.is_heap_allocated());
}
}
#[test]
fn test_u16_to_compact_string() {
let vals = [u16::MIN, 1, 42, 999, u16::MAX - 2, u16::MAX - 1, u16::MAX];
for x in &vals {
let c = x.to_compact_string();
let s = x.to_string();
assert_eq!(c, s);
assert!(!c.is_heap_allocated());
}
}
#[test]
fn test_i16_to_compact_string() {
let vals = [
i16::MIN,
i16::MIN + 1,
i16::MIN + 2,
-42,
-1,
0,
1,
42,
999,
i16::MAX - 2,
i16::MAX - 1,
i16::MAX,
];
for x in &vals {
let c = x.to_compact_string();
let s = x.to_string();
assert_eq!(c, s);
assert!(!c.is_heap_allocated());
}
}
#[test]
fn test_u32_to_compact_string() {
let vals = [
u32::MIN,
1,
42,
999,
123456789,
u32::MAX - 2,
u32::MAX - 1,
u32::MAX,
];
for x in &vals {
let c = x.to_compact_string();
let s = x.to_string();
assert_eq!(c, s);
assert!(!c.is_heap_allocated());
}
}
#[test]
fn test_i32_to_compact_string() {
let vals = [
i32::MIN,
i32::MIN + 2,
i32::MIN + 1,
-12345678,
-42,
-1,
0,
1,
999,
123456789,
i32::MAX - 2,
i32::MAX - 1,
i32::MAX,
];
for x in &vals {
let c = x.to_compact_string();
let s = x.to_string();
assert_eq!(c, s);
assert!(!c.is_heap_allocated());
}
}
#[test]
fn test_u64_to_compact_string() {
let vals = [
u64::MIN,
1,
999,
123456789,
98765432123456,
u64::MAX - 2,
u64::MAX - 1,
u64::MAX,
];
for x in &vals {
let c = x.to_compact_string();
let s = x.to_string();
assert_eq!(c, s);
// u64 can be up-to 20 characters long, which can't be inlined on 32-bit arches
#[cfg(target_pointer_width = "64")]
assert!(!c.is_heap_allocated());
}
}
#[test]
fn test_i64_to_compact_string() {
let vals = [
i64::MIN,
i64::MIN + 1,
i64::MIN + 2,
-22222222,
-42,
0,
1,
999,
123456789,
i64::MAX - 2,
i64::MAX - 1,
i64::MAX,
];
for x in &vals {
let c = x.to_compact_string();
let s = x.to_string();
assert_eq!(c, s);
// i64 can be up-to 20 characters long, which can't be inlined on 32-bit arches
#[cfg(target_pointer_width = "64")]
assert!(!c.is_heap_allocated());
}
}
#[test]
fn test_u128_to_compact_string() {
let vals = [
u128::MIN,
1,
999,
123456789,
u128::MAX - 2,
u128::MAX - 1,
u128::MAX,
];
for x in &vals {
let c = x.to_compact_string();
let s = x.to_string();
assert_eq!(c, s);
}
}
#[test]
fn test_i128_to_compact_string() {
let vals = [
i128::MIN,
i128::MIN + 1,
i128::MIN + 2,
-22222222,
-42,
0,
1,
999,
123456789,
i128::MAX - 2,
i128::MAX - 1,
i128::MAX,
];
for x in &vals {
let c = x.to_compact_string();
let s = x.to_string();
assert_eq!(c, s);
}
}
#[test]
fn test_bool_to_compact_string() {
let c = true.to_compact_string();
let s = true.to_string();
assert_eq!("true", c);
assert_eq!(c, s);
assert!(!c.is_heap_allocated());
let c = false.to_compact_string();
let s = false.to_string();
assert_eq!("false", c);
assert_eq!(c, s);
assert!(!c.is_heap_allocated());
}
macro_rules! format_compact {
( $fmt:expr $(, $args:tt)* ) => {
ToCompactString::to_compact_string(
&core::format_args!(
$fmt,
$(
$args,
)*
)
)
};
}
macro_rules! assert_int_MAX_to_compact_string {
($int: ty) => {
assert_eq!(&*<$int>::MAX.to_string(), &*<$int>::MAX.to_compact_string());
};
}
#[test]
fn test_to_compact_string() {
// Test specialisation for bool, char and String
assert_eq!(&*true.to_string(), "true".to_compact_string());
assert_eq!(&*false.to_string(), "false".to_compact_string());
assert_eq!("1", '1'.to_compact_string());
assert_eq!("2333", "2333".to_string().to_compact_string());
assert_eq!("2333", "2333".to_compact_string().to_compact_string());
// Test specialisation for int and nonzero_int using itoa
assert_eq!("234", 234.to_compact_string());
assert_eq!(
"234",
num::NonZeroU64::new(234).unwrap().to_compact_string()
);
assert_int_MAX_to_compact_string!(u8);
assert_int_MAX_to_compact_string!(i8);
assert_int_MAX_to_compact_string!(u16);
assert_int_MAX_to_compact_string!(i16);
assert_int_MAX_to_compact_string!(u32);
assert_int_MAX_to_compact_string!(i32);
assert_int_MAX_to_compact_string!(u64);
assert_int_MAX_to_compact_string!(i64);
assert_int_MAX_to_compact_string!(usize);
assert_int_MAX_to_compact_string!(isize);
// Test specialisation for f32 and f64 using ryu
// TODO: Fix bug in powerpc64, which is a little endian system
#[cfg(not(all(target_arch = "powerpc64", target_pointer_width = "64")))]
{
assert_eq!(
(&*3.2_f32.to_string(), &*288888.290028_f64.to_string()),
(
&*3.2_f32.to_compact_string(),
&*288888.290028_f64.to_compact_string()
)
);
assert_eq!("inf", f32::INFINITY.to_compact_string());
assert_eq!("-inf", f32::NEG_INFINITY.to_compact_string());
assert_eq!("inf", f64::INFINITY.to_compact_string());
assert_eq!("-inf", f64::NEG_INFINITY.to_compact_string());
assert_eq!("NaN", f32::NAN.to_compact_string());
assert_eq!("NaN", f64::NAN.to_compact_string());
}
// Test generic Display implementation
assert_eq!("234", "234".to_compact_string());
assert_eq!("12345", format_compact!("{}", "12345"));
assert_eq!("112345", format_compact!("1{}", "12345"));
assert_eq!("1123452", format_compact!("1{}{}", "12345", 2));
assert_eq!("11234522", format_compact!("1{}{}{}", "12345", 2, '2'));
assert_eq!(
"112345221000",
format_compact!("1{}{}{}{}", "12345", 2, '2', 1000)
);
// Test string longer than repr::MAX_SIZE
assert_eq!(
"01234567890123456789999999",
format_compact!("0{}67890123456789{}", "12345", 999999)
);
}
#[test]
fn test_into_string_large_string_with_excess_capacity() {
let mut string = String::with_capacity(128);
string.push_str("abcdefghijklmnopqrstuvwxyz");
let str_addr = string.as_ptr();
let str_len = string.len();
let str_cap = string.capacity();
let compact = CompactString::from(string);
let new_string = String::from(compact);
let new_str_addr = new_string.as_ptr();
let new_str_len = new_string.len();
let new_str_cap = new_string.capacity();
assert_eq!(str_addr, new_str_addr);
assert_eq!(str_len, new_str_len);
assert_eq!(str_cap, new_str_cap);
}
#[test]
fn test_into_string_where_32_bit_capacity_is_on_heap() {
const SIXTEEN_MB: usize = 16 * 1024 * 1024;
let buf = vec![b'a'; SIXTEEN_MB - 1];
// SAFETY: `buf` is filled with ASCII `a`s.
// This primarily speeds up miri, as we don't need to check every byte
// in the input buffer
let string = unsafe { String::from_utf8_unchecked(buf) };
let str_addr = string.as_ptr();
let str_len = string.len();
let str_cap = string.capacity();
let compact = CompactString::from(string);
let new_string = String::from(compact);
let new_str_addr = new_string.as_ptr();
let new_str_len = new_string.len();
let new_str_cap = new_string.capacity();
assert_eq!(str_len, new_str_len);
if cfg!(target_pointer_width = "64") {
assert_eq!(str_addr, new_str_addr);
assert_eq!(str_cap, new_str_cap);
} else {
assert_eq!(&new_string.as_bytes()[0..10], b"aaaaaaaaaa");
assert_eq!(str_len, new_str_cap);
}
}
#[test]
fn test_into_string_small_string_with_excess_capacity() {
let mut string = String::with_capacity(128);
string.push_str("abcdef");
let str_addr = string.as_ptr();
let str_len = string.len();
let str_cap = string.capacity();
let compact = CompactString::from(string);
let new_string = String::from(compact);
let new_str_addr = new_string.as_ptr();
let new_str_len = new_string.len();
let new_str_cap = new_string.capacity();
// If small boxed strings are eagerly compacted, the address and capacity assertions won't hold.
// Compaction is not eager, so these should hold.
assert_eq!(str_addr, new_str_addr);
assert_eq!(str_len, new_str_len);
assert_eq!(str_cap, new_str_cap);
}
#[test]
fn test_into_string_small_string_with_no_excess_capacity() {
let string = String::from("abcdef");
let str_addr = string.as_ptr();
let str_len = string.len();
let str_cap = string.capacity();
let compact = CompactString::from(string);
let new_string = String::from(compact);
let new_str_addr = new_string.as_ptr();
let new_str_len = new_string.len();
let new_str_cap = new_string.capacity();
// If small boxed strings are eagerly compacted, the address assertion won't hold.
// Compaction is not eager, so these should hold.
assert_eq!(str_addr, new_str_addr);
assert_eq!(str_len, new_str_len);
assert_eq!(str_cap, new_str_cap);
}
#[test]
fn test_into_string_empty_string() {
let string = String::new();
let str_addr = string.as_ptr();
let str_len = string.len();
let str_cap = string.capacity();
let compact = CompactString::from(string);
let new_string = String::from(compact);
let new_str_addr = new_string.as_ptr();
let new_str_len = new_string.len();
let new_str_cap = new_string.capacity();
assert_eq!(str_addr, new_str_addr);
assert_eq!(str_len, new_str_len);
assert_eq!(str_cap, new_str_cap);
}
#[test]
fn test_into_string_small_str() {
let data = "abcdef";
let str_addr = data.as_ptr();
let str_len = data.len();
let compact = CompactString::from(data);
let new_string = String::from(compact);
let new_str_addr = new_string.as_ptr();
let new_str_len = new_string.len();
let new_str_cap = new_string.capacity();
assert_ne!(str_addr, new_str_addr);
assert_eq!(str_len, new_str_len);
assert_eq!(str_len, new_str_cap);
}
#[test]
fn test_into_string_long_str() {
let data = "abcdefghijklmnopqrstuvwxyz";
let str_addr = data.as_ptr();
let str_len = data.len();
let compact = CompactString::from(data);
let new_string = String::from(compact);
let new_str_addr = new_string.as_ptr();
let new_str_len = new_string.len();
let new_str_cap = new_string.capacity();
assert_ne!(str_addr, new_str_addr);
assert_eq!(str_len, new_str_len);
assert_eq!(str_len, new_str_cap);
}
#[test]
fn test_into_string_empty_str() {
let data = "";
let str_len = data.len();
let compact = CompactString::from(data);
let new_string = String::from(compact);
let new_str_addr = new_string.as_ptr();
let new_str_len = new_string.len();
let new_str_cap = new_string.capacity();
assert_eq!(String::new().as_ptr(), new_str_addr);
assert_eq!(str_len, new_str_len);
assert_eq!(str_len, new_str_cap);
}
|
{
let compact: CompactString = word.clone().chars().collect();
prop_assert_eq!(&word, &compact)
}
|
endpoint-ellipses_test.go
|
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"reflect"
"testing"
"github.com/minio/minio/pkg/ellipses"
)
// Tests create endpoints with ellipses and without.
func TestCreateServerEndpoints(t *testing.T) {
testCases := []struct {
serverAddr string
args []string
success bool
}{
// Invalid input.
{"", []string{}, false},
// Range cannot be negative.
{":9000", []string{"/export1{-1...1}"}, false},
// Range cannot start bigger than end.
{":9000", []string{"/export1{64...1}"}, false},
// Range can only be numeric.
{":9000", []string{"/export1{a...z}"}, false},
// Duplicate disks not allowed.
{":9000", []string{"/export1{1...32}", "/export1{1...32}"}, false},
// Same host cannot export same disk on two ports - special case localhost.
{":9001", []string{"http://localhost:900{1...2}/export{1...64}"}, false},
// Valid inputs.
{":9000", []string{"/export1"}, true},
{":9000", []string{"/export1", "/export2", "/export3", "/export4"}, true},
{":9000", []string{"/export1{1...64}"}, true},
{":9000", []string{"/export1{01...64}"}, true},
{":9000", []string{"/export1{1...32}", "/export1{33...64}"}, true},
{":9001", []string{"http://localhost:9001/export{1...64}"}, true},
{":9001", []string{"http://localhost:9001/export{01...64}"}, true},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
_, _, _, err := createServerEndpoints(testCase.serverAddr, testCase.args...)
if err != nil && testCase.success {
t.Errorf("Expected success but failed instead %s", err)
}
if err == nil && !testCase.success {
t.Errorf("Expected failure but passed instead")
}
})
}
}
func TestGetDivisibleSize(t *testing.T) {
testCases := []struct {
totalSizes []uint64
result uint64
}{{[]uint64{24, 32, 16}, 8},
{[]uint64{32, 8, 4}, 4},
{[]uint64{8, 8, 8}, 8},
{[]uint64{24}, 24},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
gotGCD := getDivisibleSize(testCase.totalSizes)
if testCase.result != gotGCD {
t.Errorf("Expected %v, got %v", testCase.result, gotGCD)
}
})
}
}
// Test tests calculating set indexes with ENV override for drive count.
func TestGetSetIndexesEnvOverride(t *testing.T) {
testCases := []struct {
args []string
totalSizes []uint64
indexes [][]uint64
envOverride uint64
success bool
}{
{
[]string{"data{1...64}"},
[]uint64{64},
[][]uint64{{8, 8, 8, 8, 8, 8, 8, 8}},
8,
true,
},
{
[]string{"http://host{1...2}/data{1...180}"},
[]uint64{360},
[][]uint64{{15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}},
15,
true,
},
{
[]string{"http://host{1...12}/data{1...12}"},
[]uint64{144},
[][]uint64{{12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}},
12,
true,
},
{
[]string{"http://host{0...5}/data{1...28}"},
[]uint64{168},
[][]uint64{{12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}},
12,
true,
},
// Incorrect custom set drive count.
{
[]string{"http://host{0...5}/data{1...28}"},
[]uint64{168},
nil,
10,
false,
},
// Failure not divisible number of disks.
{
[]string{"http://host{1...11}/data{1...11}"},
[]uint64{121},
[][]uint64{{11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11}},
11,
true,
},
{
[]string{"data{1...60}"},
nil,
nil,
8,
false,
},
{
[]string{"data{1...64}"},
nil,
nil,
64,
false,
},
{
[]string{"data{1...64}"},
nil,
nil,
2,
false,
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
var argPatterns = make([]ellipses.ArgPattern, len(testCase.args))
for i, arg := range testCase.args {
patterns, err := ellipses.FindEllipsesPatterns(arg)
if err != nil {
t.Fatalf("Unexpected failure %s", err)
}
argPatterns[i] = patterns
}
gotIndexes, err := getSetIndexes(testCase.args, testCase.totalSizes, testCase.envOverride, argPatterns)
if err != nil && testCase.success {
t.Errorf("Expected success but failed instead %s", err)
}
if err == nil && !testCase.success
|
if !reflect.DeepEqual(testCase.indexes, gotIndexes) {
t.Errorf("Expected %v, got %v", testCase.indexes, gotIndexes)
}
})
}
}
// Test tests calculating set indexes.
func TestGetSetIndexes(t *testing.T) {
testCases := []struct {
args []string
totalSizes []uint64
indexes [][]uint64
success bool
}{
// Invalid inputs.
{
[]string{"data{1...3}"},
[]uint64{3},
nil,
false,
},
{
[]string{"data/controller1/export{1...2}, data/controller2/export{1...4}, data/controller3/export{1...8}"},
[]uint64{2, 4, 8},
nil,
false,
},
// Valid inputs.
{
[]string{"data{1...27}"},
[]uint64{27},
[][]uint64{{9, 9, 9}},
true,
},
{
[]string{"http://host{1...3}/data{1...180}"},
[]uint64{540},
[][]uint64{{15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}},
true,
},
{
[]string{"http://host{1...2}.rack{1...4}/data{1...180}"},
[]uint64{1440},
[][]uint64{{16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16}},
true,
},
{
[]string{"http://host{1...2}/data{1...180}"},
[]uint64{360},
[][]uint64{{12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}},
true,
},
{
[]string{"data/controller1/export{1...4}, data/controller2/export{1...8}, data/controller3/export{1...12}"},
[]uint64{4, 8, 12},
[][]uint64{{4}, {4, 4}, {4, 4, 4}},
true,
},
{
[]string{"data{1...64}"},
[]uint64{64},
[][]uint64{{16, 16, 16, 16}},
true,
},
{
[]string{"data{1...24}"},
[]uint64{24},
[][]uint64{{12, 12}},
true,
},
{
[]string{"data/controller{1...11}/export{1...8}"},
[]uint64{88},
[][]uint64{{11, 11, 11, 11, 11, 11, 11, 11}},
true,
},
{
[]string{"data{1...4}"},
[]uint64{4},
[][]uint64{{4}},
true,
},
{
[]string{"data/controller1/export{1...10}, data/controller2/export{1...10}, data/controller3/export{1...10}"},
[]uint64{10, 10, 10},
[][]uint64{{10}, {10}, {10}},
true,
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
var argPatterns = make([]ellipses.ArgPattern, len(testCase.args))
for i, arg := range testCase.args {
patterns, err := ellipses.FindEllipsesPatterns(arg)
if err != nil {
t.Fatalf("Unexpected failure %s", err)
}
argPatterns[i] = patterns
}
gotIndexes, err := getSetIndexes(testCase.args, testCase.totalSizes, 0, argPatterns)
if err != nil && testCase.success {
t.Errorf("Expected success but failed instead %s", err)
}
if err == nil && !testCase.success {
t.Errorf("Expected failure but passed instead")
}
if !reflect.DeepEqual(testCase.indexes, gotIndexes) {
t.Errorf("Expected %v, got %v", testCase.indexes, gotIndexes)
}
})
}
}
func getHexSequences(start int, number int, paddinglen int) (seq []string) {
for i := start; i <= number; i++ {
if paddinglen == 0 {
seq = append(seq, fmt.Sprintf("%x", i))
} else {
seq = append(seq, fmt.Sprintf(fmt.Sprintf("%%0%dx", paddinglen), i))
}
}
return seq
}
func getSequences(start int, number int, paddinglen int) (seq []string) {
for i := start; i <= number; i++ {
if paddinglen == 0 {
seq = append(seq, fmt.Sprintf("%d", i))
} else {
seq = append(seq, fmt.Sprintf(fmt.Sprintf("%%0%dd", paddinglen), i))
}
}
return seq
}
// Test tests parses endpoint ellipses input pattern.
func TestParseEndpointSet(t *testing.T) {
testCases := []struct {
arg string
es endpointSet
success bool
}{
// Tests invalid inputs.
{
"...",
endpointSet{},
false,
},
// No range specified.
{
"{...}",
endpointSet{},
false,
},
// Invalid range.
{
"http://minio{2...3}/export/set{1...0}",
endpointSet{},
false,
},
// Range cannot be smaller than 4 minimum.
{
"/export{1..2}",
endpointSet{},
false,
},
// Unsupported characters.
{
"/export/test{1...2O}",
endpointSet{},
false,
},
// Tests valid inputs.
{
"{1...27}",
endpointSet{
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
Prefix: "",
Suffix: "",
Seq: getSequences(1, 27, 0),
},
},
},
nil,
[][]uint64{{9, 9, 9}},
},
true,
},
{
"/export/set{1...64}",
endpointSet{
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
Prefix: "/export/set",
Suffix: "",
Seq: getSequences(1, 64, 0),
},
},
},
nil,
[][]uint64{{16, 16, 16, 16}},
},
true,
},
// Valid input for distributed setup.
{
"http://minio{2...3}/export/set{1...64}",
endpointSet{
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
Prefix: "",
Suffix: "",
Seq: getSequences(1, 64, 0),
},
{
Prefix: "http://minio",
Suffix: "/export/set",
Seq: getSequences(2, 3, 0),
},
},
},
nil,
[][]uint64{{16, 16, 16, 16, 16, 16, 16, 16}},
},
true,
},
// Supporting some advanced cases.
{
"http://minio{1...64}.mydomain.net/data",
endpointSet{
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
Prefix: "http://minio",
Suffix: ".mydomain.net/data",
Seq: getSequences(1, 64, 0),
},
},
},
nil,
[][]uint64{{16, 16, 16, 16}},
},
true,
},
{
"http://rack{1...4}.mydomain.minio{1...16}/data",
endpointSet{
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
Prefix: "",
Suffix: "/data",
Seq: getSequences(1, 16, 0),
},
{
Prefix: "http://rack",
Suffix: ".mydomain.minio",
Seq: getSequences(1, 4, 0),
},
},
},
nil,
[][]uint64{{16, 16, 16, 16}},
},
true,
},
// Supporting kubernetes cases.
{
"http://minio{0...15}.mydomain.net/data{0...1}",
endpointSet{
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
Prefix: "",
Suffix: "",
Seq: getSequences(0, 1, 0),
},
{
Prefix: "http://minio",
Suffix: ".mydomain.net/data",
Seq: getSequences(0, 15, 0),
},
},
},
nil,
[][]uint64{{16, 16}},
},
true,
},
// No host regex, just disks.
{
"http://server1/data{1...32}",
endpointSet{
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
Prefix: "http://server1/data",
Suffix: "",
Seq: getSequences(1, 32, 0),
},
},
},
nil,
[][]uint64{{16, 16}},
},
true,
},
// No host regex, just disks with two position numerics.
{
"http://server1/data{01...32}",
endpointSet{
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
Prefix: "http://server1/data",
Suffix: "",
Seq: getSequences(1, 32, 2),
},
},
},
nil,
[][]uint64{{16, 16}},
},
true,
},
// More than 2 ellipses are supported as well.
{
"http://minio{2...3}/export/set{1...64}/test{1...2}",
endpointSet{
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
Prefix: "",
Suffix: "",
Seq: getSequences(1, 2, 0),
},
{
Prefix: "",
Suffix: "/test",
Seq: getSequences(1, 64, 0),
},
{
Prefix: "http://minio",
Suffix: "/export/set",
Seq: getSequences(2, 3, 0),
},
},
},
nil,
[][]uint64{{16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16}},
},
true,
},
// More than 1 ellipses per argument for standalone setup.
{
"/export{1...10}/disk{1...10}",
endpointSet{
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
Prefix: "",
Suffix: "",
Seq: getSequences(1, 10, 0),
},
{
Prefix: "/export",
Suffix: "/disk",
Seq: getSequences(1, 10, 0),
},
},
},
nil,
[][]uint64{{10, 10, 10, 10, 10, 10, 10, 10, 10, 10}},
},
true,
},
// IPv6 ellipses with hexadecimal expansion
{
"http://[2001:3984:3989::{1...a}]/disk{1...10}",
endpointSet{
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
Prefix: "",
Suffix: "",
Seq: getSequences(1, 10, 0),
},
{
Prefix: "http://[2001:3984:3989::",
Suffix: "]/disk",
Seq: getHexSequences(1, 10, 0),
},
},
},
nil,
[][]uint64{{10, 10, 10, 10, 10, 10, 10, 10, 10, 10}},
},
true,
},
// IPv6 ellipses with hexadecimal expansion with 3 position numerics.
{
"http://[2001:3984:3989::{001...00a}]/disk{1...10}",
endpointSet{
[]ellipses.ArgPattern{
[]ellipses.Pattern{
{
Prefix: "",
Suffix: "",
Seq: getSequences(1, 10, 0),
},
{
Prefix: "http://[2001:3984:3989::",
Suffix: "]/disk",
Seq: getHexSequences(1, 10, 3),
},
},
},
nil,
[][]uint64{{10, 10, 10, 10, 10, 10, 10, 10, 10, 10}},
},
true,
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
gotEs, err := parseEndpointSet(0, testCase.arg)
if err != nil && testCase.success {
t.Errorf("Expected success but failed instead %s", err)
}
if err == nil && !testCase.success {
t.Errorf("Expected failure but passed instead")
}
if !reflect.DeepEqual(testCase.es, gotEs) {
t.Errorf("Expected %v, got %v", testCase.es, gotEs)
}
})
}
}
|
{
t.Errorf("Expected failure but passed instead")
}
|
cbre_net.py
|
import tensorflow as tf
import numpy as np
from cbre.util import *
class CBRENet(object):
"""
cbre_net implements the cycly-balanced representation learning for counterfactual inference
The network is implemented as a tensorflow graph. The class constructor
creates an object containing relevant TF nodes as member variables.
"""
def __init__(self, x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim):
"""
x The varibales of data
t The treatment applied to x, t.shape[1]==1
y_ The true outcome
p_t The treatment probability in all observations
z_norm todo unknown
flags The arg params
r_alpha The coefficient of reconstruction and cycle loss
r_lambda The coefficient of regularization of prediction network
r_beta The coefficient of gradient penalty in GAN
do_in The val of dropout_in
do_out The val of dropout_out
data_x_dim The dim of varibale x
"""
self.variables = {}
# wd_loss: regularization l2 loss
self.wd_loss = 0
if flags.nonlin.lower() == 'elu':
self.nonlin = tf.nn.elu
else:
self.nonlin = tf.nn.relu
self._build_graph(x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim)
def _add_variable(self, var, name):
"""
Adds variables to the internal track-keeper
"""
basename = name
i = 0
while name in self.variables:
name = '%s_%d' % (basename, i) # @TODO: not consistent with TF internally if changed
i += 1
self.variables[name] = var
def _create_variable(self, var, name):
""" Create and adds variables to the internal track-keeper """
# tf.get_variable(name=name, initializer=var)
var = tf.Variable(var, name=name)
self._add_variable(var, name)
return var
def _create_variable_with_weight_decay(self, initializer, name, wd):
""" Create and adds variables to the internal track-keeper
and adds it to the list of weight decayed variables """
var = self._create_variable(initializer, name)
self.wd_loss += wd * tf.nn.l2_loss(var)
return var
def _build_graph(self, x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim):
"""
Constructs a TensorFlow subgraph for causal effect inference.
Sets the following member variables (to TF nodes):
self.output The output prediction "y"
self.tot_loss The total objective to minimize
self.pred_loss The prediction term of the objective
self.weights_in The input/representation layer weights
self.weights_out The output/post-representation layer weights
self.weights_pred The (linear) prediction layer weights
self.h_rep The layer of the penalized representation
"""
self.x = x
self.t = t
self.y_ = y_
self.p_t = p_t
self.r_alpha = r_alpha
self.r_lambda = r_lambda
self.r_beta = r_beta
self.do_in = do_in
self.do_out = do_out
self.z_norm = z_norm
self.encoder_dim = flags.encoder_dim
encoder_dim = flags.encoder_dim
self.decoder_dim = flags.decoder_dim
self.predictor_dim = flags.predictor_dim
predictor_dim = flags.predictor_dim
mi_estimator_dim = flags.mi_estimator_dim
self.discriminator_dim = flags.discriminator_dim
discriminator_dim = flags.discriminator_dim
"""
Network Components
"""
'''
1. Encoder Network
'''
# Construct Encoder network layers, four layers with size 200
h_rep, h_rep_norm, weights_in = self._build_encoder(x, data_x_dim, flags)
'''
2. GAN
'''
d0, d1, dp, weights_dis, weights_discore = self._build_adversarial_graph(h_rep_norm, t, encoder_dim,
discriminator_dim, do_out,
flags)
# discriminator
# with sigmoid
# discriminator_loss = tf.reduce_mean(tf.nn.softplus(-d0)) + tf.reduce_mean(tf.nn.softplus(-d1) + d1) + dp
# without sigmoid
discriminator_loss = -tf.reduce_mean(d0) + tf.reduce_mean(d1) + r_beta * dp
# encoder
# with sigmoid
# rep_loss = tf.reduce_mean(tf.nn.softplus(-d1))
# without sigmoid
# todo rep_loss in paper: rep_loss = tf.reduce_mean(d0) - tf.reduce_mean(d1)
rep_loss = tf.reduce_mean(d0) - tf.reduce_mean(d1)
# rep_loss = -tf.reduce_mean(d1)
'''
3. Reconstruction
'''
# graph for reconstruction loss
x0, recons_x_0, x1, recons_x_1 = self._build_reconstruct_graph(x, t, data_x_dim, flags)
recons_loss = tf.sqrt(tf.reduce_mean(tf.square(x0 - recons_x_0)) + 1.0e-12) + tf.sqrt(
tf.reduce_mean(tf.square(x1 - recons_x_1)) + 1.0e-12)
'''
4. Cycle
'''
x0, cycle_x0, x1, cycle_x1 = self._build_cycle_graph(x, t, data_x_dim, flags)
cycle_loss = tf.sqrt(tf.reduce_mean(tf.square(x0 - cycle_x0)) + 1.0e-12) + tf.sqrt(
tf.reduce_mean(tf.square(x1 - cycle_x1)) + 1.0e-12)
'''
Predict Networks
'''
y, weights_out, weights_pred = self._build_output_graph(h_rep_norm, t, encoder_dim, predictor_dim, do_out,
flags)
""" Compute sample reweighting """
if flags.reweight_sample:
w_t = t / (2 * p_t)
w_c = (1 - t) / (2 * 1 - p_t)
sample_weight = w_t + w_c
else:
sample_weight = 1.0
self.sample_weight = sample_weight
risk = tf.reduce_mean(sample_weight * tf.square(y_ - y))
pred_error = tf.sqrt(tf.reduce_mean(tf.square(y_ - y)) + 1.0e-12)
""" Regularization """
if flags.p_lambda > 0 and flags.rep_weight_decay:
for i in range(0, flags.layer_num_encoder):
if not (flags.varsel and i == 0): # No penalty on W in variable selection
self.wd_loss += tf.nn.l2_loss(weights_in[i])
""" Total error """
tot_error = risk
if flags.p_lambda > 0:
tot_error = tot_error + r_lambda * self.wd_loss + recons_loss + cycle_loss
if flags.coef_recons > 0:
tot_error += flags.coef_recons * recons_loss
if flags.coef_cycle:
tot_error += flags.coef_cycle * cycle_loss
if flags.coef_d:
tot_error += flags.coef_d * discriminator_loss
if flags.varsel:
self.w_proj = tf.placeholder("float", shape=[data_x_dim], name='w_proj')
self.projection = weights_in[0].assign(self.w_proj)
self.output = y
self.tot_loss = tot_error
self.discriminator_loss = discriminator_loss
self.rep_loss = rep_loss
self.rec_loss = recons_loss
|
self.pred_loss = pred_error
self.weights_in = weights_in
self.weights_out = weights_out
self.weights_dis = weights_dis
self.weights_discore = weights_discore
self.weights_pred = weights_pred
self.h_rep = h_rep
self.h_rep_norm = h_rep_norm
self.dp = dp
def _build_output_0(self, h_input, encoder_dim, predictor_dim, do_out, flags):
h_out = [h_input]
dims = [encoder_dim] + ([predictor_dim] * flags.layer_num_predictor)
with tf.variable_scope('pred_0') as scope:
weights_out = []
biases_out = []
for i in range(0, flags.layer_num_predictor):
wo = tf.get_variable(name='w_{}'.format(i),
initializer=tf.random_normal([dims[i], dims[i + 1]],
stddev=flags.weight_init / np.sqrt(dims[i])))
weights_out.append(wo)
# biases_out.append(tf.Variable(tf.zeros([1, predictor_dim])))
biases_out.append(tf.get_variable(name='b_{}'.format(i), initializer=tf.zeros([1, predictor_dim])))
z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]
h_out.append(self.nonlin(z))
h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)
weights_pred = self._create_variable(tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(predictor_dim)),
'w_pred')
weights_pred = tf.get_variable(name='w_pred', initializer=tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(
predictor_dim)))
bias_pred = tf.get_variable(initializer=tf.zeros([1]), name='b_pred')
if flags.varsel or flags.layer_num_predictor == 0:
self.wd_loss += tf.nn.l2_loss(
tf.slice(weights_pred, [0, 0], [predictor_dim - 1, 1])) # don't penalize treatment coefficient
else:
self.wd_loss += tf.nn.l2_loss(weights_pred)
""" Construct linear classifier """
h_pred = h_out[-1]
y = tf.matmul(h_pred, weights_pred) + bias_pred
return y, weights_out, weights_pred
def _build_output_1(self, h_input, encoder_dim, predictor_dim, do_out, flags):
h_out = [h_input]
dims = [encoder_dim] + ([predictor_dim] * flags.layer_num_predictor)
with tf.variable_scope('pred_1') as scope:
weights_out = []
biases_out = []
for i in range(0, flags.layer_num_predictor):
wo = tf.get_variable(name='w_{}'.format(i),
initializer=tf.random_normal([dims[i], dims[i + 1]],
stddev=flags.weight_init / np.sqrt(dims[i])))
weights_out.append(wo)
# biases_out.append(tf.Variable(tf.zeros([1, predictor_dim])))
biases_out.append(tf.get_variable(name='b_{}'.format(i), initializer=tf.zeros([1, predictor_dim])))
z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]
h_out.append(self.nonlin(z))
h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)
weights_pred = self._create_variable(tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(predictor_dim)),
'w_pred')
weights_pred = tf.get_variable(name='w_pred', initializer=tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(
predictor_dim)))
bias_pred = tf.get_variable(initializer=tf.zeros([1]), name='b_pred')
if flags.varsel or flags.layer_num_predictor == 0:
self.wd_loss += tf.nn.l2_loss(
tf.slice(weights_pred, [0, 0], [predictor_dim - 1, 1])) # don't penalize treatment coefficient
else:
self.wd_loss += tf.nn.l2_loss(weights_pred)
""" Construct linear classifier """
h_pred = h_out[-1]
y = tf.matmul(h_pred, weights_pred) + bias_pred
return y, weights_out, weights_pred
def _build_output_graph(self, rep, t, encoder_dim, predictor_dim, do_out, flags):
""" Construct output/regression layers """
if flags.split_output:
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
rep0 = tf.gather(rep, i0)
rep1 = tf.gather(rep, i1)
y0, weights_out0, weights_pred0 = self._build_output_0(rep0, encoder_dim, predictor_dim, do_out, flags)
y1, weights_out1, weights_pred1 = self._build_output_1(rep1, encoder_dim, predictor_dim, do_out, flags)
y = tf.dynamic_stitch([i0, i1], [y0, y1])
weights_out = weights_out0 + weights_out1
weights_pred = weights_pred0 + weights_pred1
else:
h_input = tf.concat(1, [rep, t])
# y, weights_out, weights_pred = self._build_output(h_input, encoder_dim + 1, predictor_dim, do_out, flags)
y, weights_out, weights_pred = None, None, None
return y, weights_out, weights_pred
def _build_encoder(self, x, data_x_dim, flags):
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE) as scope:
weights_in = []
biases_in = []
if flags.batch_norm:
bn_biases = []
bn_scales = []
h_in = [x]
for i in range(0, flags.layer_num_encoder):
if i == 0:
""" If using variable selection, first layer is just rescaling"""
if flags.varsel:
weights_in.append(tf.get_variable(name='wg_{}'.format(i),
initializer=1.0 / data_x_dim * tf.ones([data_x_dim])))
else:
wg = tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([data_x_dim, self.encoder_dim],
stddev=flags.weight_init / np.sqrt(
data_x_dim)))
weights_in.append(wg)
else:
wg = tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([self.encoder_dim, self.encoder_dim],
stddev=flags.weight_init / np.sqrt(
self.encoder_dim)))
weights_in.append(wg)
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, self.encoder_dim])))
# z equals outcome of each layer in Encoder Network.
z = tf.matmul(h_in[i], weights_in[i]) + biases_in[i]
if flags.batch_norm:
batch_mean, batch_var = tf.nn.moments(z, [0])
if flags.normalization == 'bn_fixed':
z = tf.nn.batch_normalization(z, batch_mean, batch_var, 0, 1, 1e-3)
else:
# bn_biases.append(tf.Variable(tf.zeros([self.encoder_dim])))
bn_biases.append(
tf.get_variable(name='bn_b_{}'.format(i), initializer=tf.zeros([self.encoder_dim])))
# bn_scales.append(tf.Variable(tf.ones([self.encoder_dim])))
bn_scales.append(
tf.get_variable(name='bn_s_{}'.format(i), initializer=tf.ones([self.encoder_dim])))
z = tf.nn.batch_normalization(z, batch_mean, batch_var, bn_biases[-1], bn_scales[-1], 1e-3)
h_in.append(self.nonlin(z))
h_in[i + 1] = tf.nn.dropout(h_in[i + 1], self.do_in)
h_rep = h_in[-1]
# todo normalization meaning?
if flags.normalization == 'divide':
h_rep_norm = h_rep / safe_sqrt(tf.reduce_sum(tf.square(h_rep), axis=1, keep_dims=True) + 1.0e-12)
else:
h_rep_norm = 1.0 * h_rep
return h_rep, h_rep_norm, weights_in
def _build_decoder(self, h_rep, data_x_dim, flags, suffix='0'):
with tf.variable_scope('decoder_' + suffix, reuse=tf.AUTO_REUSE) as scope:
weights_in = []
biases_in = []
recons_x = [h_rep]
decoder_dim = flags.decoder_dim
for i in range(0, flags.layer_num_decoder):
if i == 0:
weights_in.append(tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([flags.encoder_dim, decoder_dim],
stddev=flags.weight_init / np.sqrt(
flags.encoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, decoder_dim])))
elif i == flags.layer_num_decoder - 1:
weights_in.append(
tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal([decoder_dim, data_x_dim],
stddev=flags.weight_init / np.sqrt(
decoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, data_x_dim])))
else:
weights_in.append(
tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal([decoder_dim, decoder_dim],
stddev=flags.weight_init / np.sqrt(
decoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, decoder_dim])))
# z equals outcome of each layer in Encoder Network.
z = tf.matmul(recons_x[i], weights_in[i]) + biases_in[i]
recons_x.append(self.nonlin(z))
recons_x[i + 1] = tf.nn.dropout(recons_x[i + 1], self.do_in)
recons_x = recons_x[-1]
return recons_x, weights_in
def _build_discriminator_graph_mine(self, x, hrep, data_x_dim, encoder_dim, mi_estimator_dim, flags):
""" Construct MI estimation layers """
# two layers with size 200
with tf.variable_scope('gmi') as scope:
input_num = tf.shape(x)[0]
x_shuffle = tf.random_shuffle(x)
x_conc = tf.concat([x, x_shuffle], axis=0)
y_conc = tf.concat([hrep, hrep], axis=0)
# forward
# [25, 200]
weights_mi_x = self._create_variable(tf.random_normal([data_x_dim, mi_estimator_dim],
stddev=flags.weight_init / np.sqrt(data_x_dim)),
'weights_mi_x')
biases_mi_x = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_x')
# [, 200]
lin_x = tf.matmul(x_conc, weights_mi_x) + biases_mi_x
# [200, 200]
weights_mi_y = self._create_variable(tf.random_normal([encoder_dim, mi_estimator_dim],
stddev=flags.weight_init / np.sqrt(encoder_dim)),
'weights_mi_y')
biases_mi_y = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_y')
# [, 200]
lin_y = tf.matmul(y_conc, weights_mi_y) + biases_mi_y
# lin_conc = tf.nn.relu(lin_x + lin_y)
lin_conc = self.nonlin(lin_x + lin_y)
weights_mi_pred = self._create_variable(tf.random_normal([mi_estimator_dim, 1],
stddev=flags.weight_init / np.sqrt(
mi_estimator_dim)),
'gmi_p')
biases_mi_pred = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_pred')
gmi_output = tf.matmul(lin_conc, weights_mi_pred) + biases_mi_pred
# real estimator outcome: shape=[input_num, 1]
real_estimate = gmi_output[:input_num]
# fake estimator outcome: shape=[input_num, 1]
fake_estimate = gmi_output[input_num:]
return real_estimate, fake_estimate, weights_mi_x, weights_mi_y, weights_mi_pred
def _build_discriminator_adversarial(self, hrep, encoder_dim, discriminator_dim, do_out, flags):
""" Construct adversarial discriminator layers """
with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE) as scope:
h_dis = [hrep]
weights_dis = []
biases_dis = []
for i in range(0, flags.layer_num_discriminator):
if i == 0:
weights_dis.append(tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([encoder_dim, discriminator_dim],
stddev=flags.weight_init / np.sqrt(
encoder_dim))))
else:
weights_dis.append(tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal(
[discriminator_dim, discriminator_dim],
stddev=flags.weight_init / np.sqrt(
discriminator_dim))))
biases_dis.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, discriminator_dim])))
z = tf.matmul(h_dis[i], weights_dis[i]) + biases_dis[i]
h_dis.append(self.nonlin(z))
h_dis[i + 1] = tf.nn.dropout(h_dis[i + 1], do_out)
weights_discore = tf.get_variable(initializer=tf.random_normal([discriminator_dim, 1],
stddev=flags.weight_init / np.sqrt(
discriminator_dim)), name='dc_p')
bias_dc = tf.get_variable(initializer=tf.zeros([1]), name='dc_b_p')
h_score = h_dis[-1]
dis_score = tf.matmul(h_score, weights_discore) + bias_dc
return dis_score, weights_dis, weights_discore
def _build_adversarial_graph(self, rep, t, encoder_dim, discriminator_dim, do_out, flags):
"""
Construct adversarial discriminator
"""
# three layers with size 200
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
rep0 = tf.gather(rep, i0)
rep1 = tf.gather(rep, i1)
z_rep0 = tf.reduce_max(rep0, axis=0, keep_dims=True)
z_rep1 = tf.reduce_max(rep1, axis=0, keep_dims=True)
z_rep0_conc = tf.concat([z_rep0, self.z_norm], axis=1)
z_rep1_conc = tf.concat([z_rep1, self.z_norm], axis=1)
d0, weights_dis, weights_discore = self._build_discriminator_adversarial(z_rep0_conc, encoder_dim + encoder_dim,
discriminator_dim,
do_out, flags)
d1, weights_dis, weights_discore = self._build_discriminator_adversarial(z_rep1_conc, encoder_dim + encoder_dim,
discriminator_dim,
do_out, flags)
# gradient penalty
alpha_dist = tf.contrib.distributions.Uniform(low=0., high=1.)
alpha = alpha_dist.sample((1, 1))
interpolated = z_rep1 + alpha * (z_rep0 - z_rep1)
interpolated_conc = tf.concat([interpolated, self.z_norm], axis=1)
inte_logit, weights_dis, weights_discore = self._build_discriminator_adversarial(interpolated_conc,
encoder_dim + encoder_dim,
discriminator_dim, do_out,
flags)
gradients = tf.gradients(inte_logit, [interpolated])[0]
grad_l2 = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1]) + 1.0e-12)
gradient_penalty = tf.reduce_mean(tf.square(grad_l2 - 1.0))
return d0, d1, gradient_penalty, weights_dis, weights_discore
def _build_reconstruct_graph(self, x, t, data_x_dim, flags):
""" construct graph for later computing reconstruction loss easily
Parameters:
x The varibales of data
t The treatment applied to x
Returns:
x0 x[t=0]
reconstruct_x reconstruct x when pass encoder and decoder networks
"""
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
x0 = tf.gather(x, i0)
x1 = tf.gather(x, i1)
h_rep_0, h_rep_norm_0, weights_in_0 = self._build_encoder(x0, data_x_dim, flags)
h_rep_1, h_rep_norm_1, weights_in_1 = self._build_encoder(x1, data_x_dim, flags)
recons_x_0, _ = self._build_decoder(h_rep_norm_0, data_x_dim, flags, suffix='0')
recons_x_1, _ = self._build_decoder(h_rep_norm_1, data_x_dim, flags, suffix='1')
return x0, recons_x_0, x1, recons_x_1
def _build_cycle_graph(self, x, t, data_x_dim, flags):
""" construct graph for later computing cycle loss easily
Parameters:
x The varibales of data
t The treatment applied to x
Returns:
x0 x[t=0]
reconstruct_x reconstruct x when pass encoder and decoder networks
"""
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
x0 = tf.gather(x, i0)
x1 = tf.gather(x, i1)
# cycle x0-x1'-x0
_, h_rep_norm_0, _ = self._build_encoder(x0, data_x_dim, flags)
temp_x_0_in_1, _ = self._build_decoder(h_rep_norm_0, data_x_dim, flags, suffix='1')
_, cyc_h_rep_norm_0, _ = self._build_encoder(temp_x_0_in_1, data_x_dim, flags)
cycle_x0, _ = self._build_decoder(cyc_h_rep_norm_0, data_x_dim, flags, suffix='0')
# cycle x1-x0'-x1
_, h_rep_norm_1, _ = self._build_encoder(x1, data_x_dim, flags)
temp_x_1_in_0, _ = self._build_decoder(h_rep_norm_1, data_x_dim, flags, suffix='0')
_, cyc_h_rep_norm_1, _ = self._build_encoder(temp_x_1_in_0, data_x_dim, flags)
cycle_x1, _ = self._build_decoder(cyc_h_rep_norm_1, data_x_dim, flags, suffix='1')
return x0, cycle_x0, x1, cycle_x1
|
self.cycle_loss = cycle_loss
self.recons_cycle_loss = recons_loss + cycle_loss
|
views.py
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.permissions import IsAuthenticated
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
class
|
(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns a list of APIView features"""
an_apiview = [
'User HTTP methods as functions (get, post, put, patch, delete)',
'Is similar to a traditional django view',
'gives you the most control over your application logic',
'Is mapped manually to URLs'
]
return Response({'message':'Hello', 'an_apiview':an_apiview})
def post(self, request):
"""Create a hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message':message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({'method':'put'})
def patch(self, request, pk=None):
"""Handle a partial update of an object"""
return Response({'method':'patch'})
def delete(self, request, pk=None):
"""Handle deleting an object"""
return Response({'method':'delete'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello message"""
a_viewset = [
'Uses actions(list, create, retrieve, update, partial_update)',
'Automatically maps to URLs using routers',
'Provides more functionalities with less code'
]
return Response({'message':'hello', 'a_viewset':a_viewset})
def create(self, request):
"""Create a new hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message':message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""Handle getting an object by ID"""
return Response({'method':'GET'})
def update(self, request, pk=None):
"""Handle updating an object"""
return Response({'method':'PUT'})
def partial_update(self, request, pk=None):
"""Handle updating part an object"""
return Response({'method':'PATCH'})
def destroy(self, request, pk=None):
"""Handle removing an object"""
return Response({'method':'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name','email',)
class UserLoginApiView(ObtainAuthToken):
"""Handle user authentication and auth tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handle creating, updating of profile feeds"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (
permissions.UpdateOwnStatus,
IsAuthenticatedOrReadOnly,
IsAuthenticated,
)
#IsAuthenticated makes view set only available to logged in user whether it's a safe method or not
def perform_create(self, serializer):
"""Sets user profile to logged in user"""
serializer.save(user_profile=self.request.user)
|
HelloApiView
|
main.rs
|
//! Substrate Node Template CLI library.
#![warn(missing_docs)]
#![warn(unused_extern_crates)]
extern crate futures;
#[macro_use]
extern crate error_chain;
extern crate tokio;
#[macro_use]
extern crate log;
extern crate substrate_cli;
extern crate substrate_primitives as primitives;
extern crate substrate_consensus_aura as consensus;
extern crate substrate_client as client;
#[macro_use]
extern crate substrate_network as network;
#[macro_use]
extern crate substrate_executor;
extern crate substrate_transaction_pool as transaction_pool;
#[macro_use]
extern crate substrate_service;
extern crate template_node_runtime;
#[macro_use]
extern crate structopt;
extern crate node_executor;
extern crate sr_primitives as runtime_primitives;
extern crate substrate_finality_grandpa as grandpa;
mod chain_spec;
mod service;
mod cli;
pub use substrate_cli::{VersionInfo, IntoExit, error};
fn run() -> cli::error::Result<()>
|
quick_main!(run);
|
{
let version = VersionInfo {
commit: env!("VERGEN_SHA_SHORT"),
version: env!("CARGO_PKG_VERSION"),
executable_name: "template-node",
author: "Anonymous",
description: "Template Node",
};
cli::run(::std::env::args(), cli::Exit, version)
}
|
throttle.ts
|
export const THROTTLE_MS = 500;
|
let lastCall: number = null;
return function (...args: any[]) {
const now = Date.now();
if (lastCall === null || (now - lastCall > throttleMs)) {
fn.apply(this, args);
lastCall = now;
}
};
}
|
// NOTE(lmr): this is a huge hack right now, and prevents anything from being clickable more than
// twice per second, but the alternative is so bad right now. Need to figure out how to fix the
// responder plugin later and fix this.
export function throttle(fn: Function, throttleMs = THROTTLE_MS) {
|
sequence_statements.rs
|
fn
|
() {
print!("{} + ", 80);print!("{} = ", 34);
print ! ( "{}" ,
80 + 34) ;
}
// Rust ignore blanks, tabs and line breaks, However, Rust programmers have the following
// habits that are recommended:
// 1 - to indent line by four spaces inside functions
// 2 - to avoid adding several consecutive spaces inside statements
// 3 - to avoid exceeding 80 columns, possibly splitting long statements on several lines
|
main
|
cache_test.go
|
package cache
import (
"fmt"
"github.com/stretchr/testify/assert"
"testing"
"time"
)
var (
interval = 1 * time.Second
keyPattern = "key-%d"
valuePattern = "value-%d"
)
func key(i int) string {
return fmt.Sprintf(keyPattern, i)
}
func value(i int) string {
return fmt.Sprintf(valuePattern, i)
}
func
|
(t *testing.T) {
cache := NewCache(interval)
for i := 0; i < 1000; i++ {
cache.Set(key(i), value(i), interval)
}
for i := 0; i < 1000; i++ {
v, ok := cache.Get(key(i))
if assert.True(t, ok) {
val := v.(string)
assert.Equal(t, value(i), val)
}
}
}
func TestGetExpired(t *testing.T) {
cache := NewCache(interval)
cache.Set(key(10), value(20), interval)
time.Sleep(2 * time.Second)
_, ok := cache.Get(key(10))
assert.False(t, ok)
}
func TestGetRefresh(t *testing.T) {
cache := NewCache(interval)
cache.Set(key(10), value(20), interval)
// not expired
time.Sleep(500 * time.Millisecond)
// refresh time
v, ok := cache.Get(key(10))
if assert.True(t, ok) {
val := v.(string)
assert.Equal(t, value(20), val)
}
// not expired
time.Sleep(800 * time.Millisecond)
v, ok = cache.Get(key(10))
if assert.True(t, ok) {
val := v.(string)
assert.Equal(t, value(20), val)
}
// expired
time.Sleep(2 * time.Second)
_, ok = cache.Get(key(10))
assert.False(t, ok)
}
|
TestGetAndSet
|
main.rs
|
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use nalgebra::Matrix5;
fn read_to_string(filename: &str) -> String {
// Create a path to the desired file
let path = Path::new(filename);
let display = path.display();
// Open the path in read-only mode, retuResult<File>`
let mut file = match File::open(&path) {
Err(why) => panic!("couldn't open {}: {}", display, why),
Ok(file) => file,
};
// Read the file contents into a string, returns `io::Result<usize>`
let mut s = String::new();
match file.read_to_string(&mut s) {
Err(why) => panic!("couldn't read {}: {}", display, why),
Ok(_) => print!("{} contains: {} lines\n", display, s.lines().count()),
}
return s;
}
type Board = Matrix5<(u32, bool)>;
fn parse_input(input: &str) -> (Vec<u32>, Vec<Board>) {
let mut lines = input.split("\n\n");
let numbers = lines
.next()
.unwrap()
.split(',')
.filter_map(|x| x.parse::<u32>().ok())
.collect::<Vec<_>>();
let boards = lines
.map(|board| {
Matrix5::from_iterator(board.lines().flat_map(|line| {
line.split_whitespace()
.filter_map(|x| x.parse::<u32>().ok())
.map(|x| (x, false))
}))
})
.collect::<Vec<Board>>();
return (numbers, boards);
}
fn update_board(board: &mut Board, num: u32) {
board.iter_mut().for_each(|x| {
if x.0 == num {
x.1 = true;
}
});
}
fn check_win(board: &Board) -> bool {
let column_won = board
.column_iter()
.any(|col| col.iter().all(|(_, marked)| *marked));
let row_won = board
.row_iter()
.any(|row| row.iter().all(|(_, marked)| *marked));
row_won || column_won
}
fn calc_unmarked_sum(board: &Board) -> u32 {
board
.iter()
.map(|(x, marked)| if !marked { *x } else { 0 })
.sum()
}
fn part1(input: (Vec<u32>, Vec<Board>)) -> u32 {
let (numbers, mut boards) = input;
for num in numbers {
for board in boards.iter_mut() {
update_board(board, num);
if check_win(&board) {
return num * calc_unmarked_sum(&board);
}
}
}
unreachable!()
}
fn
|
(input: (Vec<u32>, Vec<Board>)) -> u32 {
let (numbers, mut boards) = input;
let mut boards_won = vec![false; boards.len()];
for num in numbers {
for (i, board) in boards.iter_mut().enumerate() {
if boards_won[i] {
continue;
}
update_board(board, num);
if check_win(&board) {
boards_won[i] = true;
if boards_won.iter().all(|x| *x) {
return num * calc_unmarked_sum(&board);
}
}
}
}
unreachable!()
}
fn main() {
let test_input = &read_to_string("test_input.txt")[..];
let parsed_test_input = parse_input(test_input);
println!("test part1: {}", part1(parsed_test_input.clone()));
println!("test part2: {}", part2(parsed_test_input.clone()));
let input = &read_to_string("input.txt")[..];
let parsed_input = parse_input(input);
println!("part1_1: {}", part1(parsed_input.clone()));
println!("part2_1: {}", part2(parsed_input.clone()));
}
|
part2
|
gn-create-xr-ip-domain-cfg-33-ydk.py
|
#!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create configuration for model Cisco-IOS-XR-ip-domain-cfg.
usage: gn-create-xr-ip-domain-cfg-33-ydk.py [-h] [-v] device
positional arguments:
device gNMI device (http://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.path import Repository
from ydk.services import CRUDService
from ydk.gnmi.providers import gNMIServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ip_domain_cfg \
as xr_ip_domain_cfg
import os
import logging
YDK_REPO_DIR = os.path.expanduser("~/.ydk/")
def
|
(ip_domain):
"""Add config data to ip_domain object."""
vrf = ip_domain.vrfs.Vrf()
vrf.vrf_name = "RED"
vrf.name = "red.example"
# first name server
server = vrf.servers.Server()
server.order = 0
server.server_address = "2001:db8:800a::1"
vrf.servers.server.append(server)
# second name server
server = vrf.servers.Server()
server.order = 1
server.server_address = "2001:db8:800a::2"
vrf.servers.server.append(server)
# third name server
server = vrf.servers.Server()
server.order = 2
server.server_address = "2001:db8:800a::3"
vrf.servers.server.append(server)
ip_domain.vrfs.vrf.append(vrf)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="gNMI device (http://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create gNMI provider
repository = Repository(YDK_REPO_DIR+device.hostname)
provider = gNMIServiceProvider(repo=repository,
address=device.hostname,
port=device.port,
username=device.username,
password=device.password)
# create CRUD service
crud = CRUDService()
ip_domain = xr_ip_domain_cfg.IpDomain() # create object
config_ip_domain(ip_domain) # add object configuration
# create configuration on gNMI device
crud.create(provider, ip_domain)
exit()
# End of script
|
config_ip_domain
|
Sum Square Difference.py
|
import math
sum_squared = 0
squared_sum = 0
for i in range(1, 101):
|
squared_sum += i
print(f"Answer: {squared_sum**2 - sum_squared}")
|
sum_squared += i**2
|
im_to_mat.py
|
import sys
import tensorflow as tf
import numpy as np
import pydicom
from PIL import Image
import weights
#takes the root folder path and returns a list of pictures
def
|
(path):
pic_list = []
full_path_list = []
for dirName, subdirList, fileList in os.walk(path):
# print('test')
for filename in file_list:
if ".png" in filename.lower() or ".jpg" in filename.lower():
full_path_list.append(os.path.join(dirName,filename))
for im in full_path_list:
pic_list.append(imageio.imread(im))
return pic_list
#takes a list of pictures and returns a list of matrices
def flip_to_mat(images):
matrix_list = []
for im in images:
arr = np.array(im)
arr = arr.reshape(1, -1)
matrix_list.append(arr)
return matrix_list
if __name__ == "__main__":
return flip_to_mat(collect_pictures(sys.argv[1]))
|
collect_pictures
|
fscommon.go
|
package fscommon
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
// FsyncFile fsyncs path contents and the parent directory contents.
func FsyncFile(path string) error {
if err := fsync(path); err != nil {
_ = os.RemoveAll(path)
return fmt.Errorf("cannot fsync file %q: %s", path, err)
}
dir := filepath.Dir(path)
if err := fsync(dir); err != nil {
return fmt.Errorf("cannot fsync dir %q: %s", dir, err)
}
return nil
}
// FsyncDir fsyncs dir contents.
func FsyncDir(dir string) error {
return fsync(dir)
}
func fsync(path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
if err := f.Sync(); err != nil {
_ = f.Close()
return err
}
return f.Close()
}
// AppendFiles appends all the files from dir to dst.
//
// All the appended files will have dir prefix.
func AppendFiles(dst []string, dir string) ([]string, error) {
d, err := os.Open(dir)
if err != nil {
return nil, fmt.Errorf("cannot open %q: %s", dir, err)
}
dst, err = appendFilesInternal(dst, d)
if err1 := d.Close(); err1 != nil {
err = err1
}
return dst, err
}
func appendFilesInternal(dst []string, d *os.File) ([]string, error) {
dir := d.Name()
dfi, err := d.Stat()
if err != nil {
return nil, fmt.Errorf("cannot stat %q: %s", dir, err)
}
if !dfi.IsDir() {
return nil, fmt.Errorf("%q isn't a directory", dir)
}
fis, err := d.Readdir(-1)
if err != nil {
return nil, fmt.Errorf("cannot read directory contents in %q: %s", dir, err)
}
for _, fi := range fis {
name := fi.Name()
if name == "." || name == ".." {
continue
}
if name == "flock.lock" {
// Do not take into account flock.lock files, since they are used
// for preventing from concurrent access.
continue
}
path := filepath.Join(dir, name)
if fi.IsDir() {
// Process directory
dst, err = AppendFiles(dst, path)
if err != nil {
return nil, fmt.Errorf("cannot list %q: %s", path, err)
}
continue
}
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
// Process file
dst = append(dst, path)
continue
}
pathOrig := path
again:
// Process symlink
pathReal, err := filepath.EvalSymlinks(pathOrig)
if err != nil {
if os.IsNotExist(err) || strings.Contains(err.Error(), "no such file or directory") {
// Skip symlink that points to nowhere.
continue
}
return nil, fmt.Errorf("cannot resolve symlink %q: %s", pathOrig, err)
}
sfi, err := os.Stat(pathReal)
if err != nil {
return nil, fmt.Errorf("cannot stat %q from symlink %q: %s", pathReal, path, err)
}
if sfi.IsDir() {
// Symlink points to directory
dstNew, err := AppendFiles(dst, pathReal)
if err != nil {
return nil, fmt.Errorf("cannot list files at %q from symlink %q: %s", pathReal, path, err)
}
pathReal += string(os.PathSeparator)
for i := len(dst); i < len(dstNew); i++ {
x := dstNew[i]
if !strings.HasPrefix(x, pathReal) {
return nil, fmt.Errorf("unexpected prefix for path %q; want %q", x, pathReal)
}
dstNew[i] = filepath.Join(path, x[len(pathReal):])
}
dst = dstNew
continue
}
if sfi.Mode()&os.ModeSymlink != os.ModeSymlink {
// Symlink points to file
dst = append(dst, path)
continue
}
// Symlink points to symlink. Process it again.
pathOrig = pathReal
goto again
}
return dst, nil
}
// RemoveEmptyDirs recursively removes empty directories under the given dir.
func RemoveEmptyDirs(dir string) error {
_, err := removeEmptyDirs(dir)
return err
}
func removeEmptyDirs(dir string) (bool, error) {
d, err := os.Open(dir)
if err != nil {
if os.IsNotExist(err) {
return true, nil
}
return false, err
}
ok, err := removeEmptyDirsInternal(d)
if err1 := d.Close(); err1 != nil {
err = err1
}
if err != nil {
return false, err
}
return ok, nil
}
func removeEmptyDirsInternal(d *os.File) (bool, error) {
dir := d.Name()
dfi, err := d.Stat()
if err != nil {
return false, fmt.Errorf("cannot stat %q: %s", dir, err)
}
if !dfi.IsDir() {
return false, fmt.Errorf("%q isn't a directory", dir)
}
fis, err := d.Readdir(-1)
if err != nil {
return false, fmt.Errorf("cannot read directory contents in %q: %s", dir, err)
}
dirEntries := 0
hasFlock := false
for _, fi := range fis {
name := fi.Name()
if name == "." || name == ".." {
continue
}
path := filepath.Join(dir, name)
if fi.IsDir() {
// Process directory
ok, err := removeEmptyDirs(path)
if err != nil {
return false, fmt.Errorf("cannot list %q: %s", path, err)
}
if !ok {
dirEntries++
}
continue
}
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
if name == "flock.lock" {
hasFlock = true
continue
}
// Skip plain files.
dirEntries++
continue
}
pathOrig := path
again:
// Process symlink
pathReal, err := filepath.EvalSymlinks(pathOrig)
if err != nil {
if os.IsNotExist(err) || strings.Contains(err.Error(), "no such file or directory") {
// Remove symlink that points to nowere.
logger.Infof("removing broken symlink %q", pathOrig)
if err := os.Remove(pathOrig); err != nil {
return false, fmt.Errorf("cannot remove %q: %s", pathOrig, err)
}
continue
}
return false, fmt.Errorf("cannot resolve symlink %q: %s", pathOrig, err)
}
sfi, err := os.Stat(pathReal)
if err != nil {
return false, fmt.Errorf("cannot stat %q from symlink %q: %s", pathReal, path, err)
}
if sfi.IsDir() {
// Symlink points to directory
ok, err := removeEmptyDirs(pathReal)
if err != nil {
return false, fmt.Errorf("cannot list files at %q from symlink %q: %s", pathReal, path, err)
|
dirEntries++
} else {
// Remove the symlink
logger.Infof("removing symlink that points to empty dir %q", pathOrig)
if err := os.Remove(pathOrig); err != nil {
return false, fmt.Errorf("cannot remove %q: %s", pathOrig, err)
}
}
continue
}
if sfi.Mode()&os.ModeSymlink != os.ModeSymlink {
// Symlink points to file. Skip it.
dirEntries++
continue
}
// Symlink points to symlink. Process it again.
pathOrig = pathReal
goto again
}
if dirEntries > 0 {
return false, nil
}
logger.Infof("removing empty dir %q", dir)
if hasFlock {
flockFilepath := filepath.Join(dir, "flock.lock")
if err := os.Remove(flockFilepath); err != nil {
return false, fmt.Errorf("cannot remove %q: %s", flockFilepath, err)
}
}
if err := os.Remove(dir); err != nil {
return false, fmt.Errorf("cannot remove %q: %s", dir, err)
}
return true, nil
}
|
}
if !ok {
|
model_default_namespace_entry.go
|
* Bosch IoT Things HTTP API
*
* Bosch IoT Things enables applications to manage digital twins of IoT device assets in a simple, convenient, robust, and secure way. These descriptions focus on the JSON-based, REST-like **HTTP API 2** of the Bosch IoT Things service. Find details in our [documentation](https://docs.bosch-iot-suite.com/things/). The Bosch IoT Things HTTP API uses response status codes (see [RFC 7231](https://tools.ietf.org/html/rfc7231#section-6)) to indicate whether a specific request has been successfully completed, or not. However, the descriptions we provide additionally to the status code (e.g. in our API docs, or error codes like. \"solutions:transaction.count.exceeded\") might change without advance notice. These are not be considered as official API, and must therefore not be applied in your applications or tests.
*
* API version: 2
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package iotthings
// DefaultNamespaceEntry A default namespace entry.
type DefaultNamespaceEntry struct {
// Describes whether the namespace is the default for the Solution
Default bool `json:"default"`
// Current state of the namespace (read-only).
State string `json:"state,omitempty"`
}
|
/*
|
|
graphql.js
|
const path = require("path")
const { ApolloServer } = require("apollo-server-lambda")
const { importSchema } = require("graphql-import")
const { makeExecutableSchema } = require("graphql-tools")
const { resolvers } = require("./resolvers")
|
const schema = makeExecutableSchema({ typeDefs, resolvers })
const server = new ApolloServer({
schema,
resolvers,
introspection: true,
playground: true,
})
exports.handler = server.createHandler()
|
const typeDefs = importSchema(path.join(__dirname, "schema.graphql"))
|
files.py
|
import os
from glob import glob
def
|
(files):
latest = 0
for file in files:
src = file['src']
if os.path.isdir(src):
date = get_latest_file_change(list({'src': x} for x in glob(os.path.join(src, '*'))))
else:
date = os.path.getmtime(src)
if date > latest:
latest = date
return int(latest)
|
get_latest_file_change
|
data-pipline.py
|
# import packages
import sys
def load_data(data_file):
# read in file
# clean data
# load to database
# define features and label arrays
return X, y
def build_model():
# text processing and model pipeline
# define parameters for GridSearchCV
# create gridsearch object and return as final model pipeline
return model_pipeline
def train(X, y, model):
# train test split
# fit model
# output model test results
|
def export_model(model):
# Export model as a pickle file
def run_pipeline(data_file):
X, y = load_data(data_file) # run ETL pipeline
model = build_model() # build model pipeline
model = train(X, y, model) # train model pipeline
export_model(model) # save model
if __name__ == '__main__':
data_file = sys.argv[1] # get filename of dataset
run_pipeline(data_file) # run data pipeline
|
return model
|
metrics.py
|
import torch
import torch.nn as nn
import numpy as np
import math
import scipy.spatial
import scipy.ndimage.morphology
"""
True Positive (真正, TP)预测为正的正样本
True Negative(真负 , TN)预测为负的负样本
False Positive (假正, FP)预测为正的负样本
False Negative(假负 , FN)预测为负的正样本
"""
def metrics(predict, label, out_class):
"""Calculate the required metrics
pred = label = [BS, class_num, H, W]
"""
IOU_list = []
Dice_list = []
false_positive_rate_list = []
false_negative_rate_list = []
acc = []
for i in range(1, out_class):
N = label.size(0)
# indices = []
# # 根据batch_size筛去全0label,有标签才计算评价指标
# for j in range(N):
# gt_true = torch.sum(label[j, i, :, :])
# if gt_true:
# indice.append(j)
#
# if indices:
Dice_list.append(diceCoeffv2(predict[:, i, :, :], label[:, i, :, :]))
IOU_list.append(IOU(predict[:, i, :, :], label[:, i, :, :]))
FP_FN_rate_list = FP_FN_rate(predict[:, i, :, :], label[:, i, :, :])
false_positive_rate_list.append(FP_FN_rate_list[0])
false_negative_rate_list.append(FP_FN_rate_list[1])
# accu = pixel_accuracy(predict[indices, i, :, :], label[indices, i, :, :])
# if accu > 0.9:
# print(f'slice id:{i}, acc:{accu}')
acc.append(pixel_accuracy(predict[:, i, :, :], label[:, i, :, :]))
# return mean(IOU_list), mean(Dice_list), mean(acc), mean(false_positive_rate_list), mean(false_negative_rate_list)
return mean(IOU_list), Dice_list, mean(acc), mean(false_positive_rate_list), mean(false_negative_rate_list)
def mean(list):
"""计算平均值"""
if not len(list):
return 0
return sum(list) / len(list)
def mean_class(list):
"""分别计算每个class平均值,返回list"""
res = []
for i in list:
if not len(i):
print('Warning class missing!')
res.append(0)
else:
res.append(mean(i).item())
return res
def batch_pix_accuracy(predict, target):
"""Batch Pixel Accuracy
Args:
predict: input 4D tensor
target: label 3D tensor
"""
_, predict = torch.max(predict, 1)
predict = predict.cpu().numpy() + 1
target = target.cpu().numpy() + 1
pixel_labeled = np.sum(target > 0)
pixel_correct = np.sum((predict == target) * (target > 0))
assert pixel_correct <= pixel_labeled, \
"Correct area should be smaller than Labeled"
return pixel_correct, pixel_labeled
def batch_intersection_union(predict, target, nclass):
"""Batch Intersection of Union
Args:
predict: input 4D tensor
target: label 3D tensor
nclass: number of categories (int)
"""
_, predict = torch.max(predict, 1)
mini = 1
maxi = nclass
nbins = nclass
predict = predict.cpu().numpy() + 1
target = target.cpu().numpy() + 1
predict = predict * (target > 0).astype(predict.dtype)
intersection = predict * (predict == target)
# areas of intersection and union
area_inter, _ = np.histogram(intersection, bins=nbins, range=(mini, maxi))
area_pred, _ = np.histogram(predict, bins=nbins, range=(mini, maxi))
area_lab, _ = np.histogram(target, bins=nbins, range=(mini, maxi))
area_union = area_pred + area_lab - area_inter
assert (area_inter <= area_union).all(), \
"Intersection area should be smaller than Union area"
return area_inter, area_union
def intersection_and_union(im_pred, im_lab, num_class):
im_pred = np.asarray(im_pred)
im_lab = np.asarray(im_lab)
# Remove classes from unlabeled pixels in gt image.
im_pred = im_pred * (im_lab > 0)
# Compute area intersection:
intersection = im_pred * (im_pred == im_lab)
area_inter, _ = np.histogram(intersection, bins=num_class - 1,
range=(1, num_class - 1))
# Compute area union:
area_pred, _ = np.histogram(im_pred, bins=num_class - 1,
range=(1, num_class - 1))
area_lab, _ = np.histogram(im_lab, bins=num_class - 1,
range=(1, num_class - 1))
area_union = area_pred + area_lab - area_inter
return area_inter, area_union
def diceCoeff(pred, gt, smooth=1e-5, ):
r""" computational formula:
dice = (2 * (pred ∩ gt)) / |pred| + |gt|
|pred|:pred中的元素和
"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
intersection = (pred_flat * gt_flat).sum(1)
unionset = pred_flat.sum(1) + gt_flat.sum(1)
score = (2 * intersection + smooth) / (unionset + smooth)
return score.sum() / N
def diceFlat(pred, gt, smooth=1e-5):
intersection = ((pred * gt).sum()).item()
unionset = (pred.sum() + gt.sum()).item()
score = (2 * intersection + smooth) / (unionset +
|
return score
def diceCoeffv2(pred, gt, eps=1e-5):
r""" computational formula:
dice = (2 * tp) / (2 * tp + fp + fn)
"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum(gt_flat * pred_flat, dim=1)
fp = torch.sum(pred_flat, dim=1) - tp
fn = torch.sum(gt_flat, dim=1) - tp
score = (2 * tp + eps) / (2 * tp + fp + fn + eps)
return score.sum() / N
def IOU(pred, gt, eps=1e-5):
r""" computational formula:
IOU = pred ∩ gt / pred ∪ gt
IOU = tp / (tp + fp + fn)
"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)
tn = torch.sum((pred_flat == 0) * (gt_flat == 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
score = (tp + eps) / (tp + fp + fn + eps)
return score.sum() / N
def FP_FN_rate(pred, gt, eps=1e-5):
r"""computational formula:
False_Positive_rate = fp / (fp + tn)
False_Negtive_rate = fn / (fn + tp)
"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)
tn = torch.sum((pred_flat == 0) * (gt_flat == 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
false_positive_rate = fp / (fp + tn + eps)
false_negtive_rate = fn / (fn + tp + eps)
return false_positive_rate.sum() / N, false_negtive_rate.sum() / N
def pixel_accuracy(pred, gt, eps=1e-5):
"""TP / (TP + FN)"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
score = (tp.float() + eps) / ((tp + fn).float() + eps)
# if score < 0.01:
# print(
# f'score:{score.item()}, gt:{torch.sum(gt_flat, dim=1).item()}, pred:{torch.sum(pred_flat, dim=1).item()}, tp:{tp.item()}, fn:{fn.item()}')
return score.sum() / N
def diceCoeffv3(pred, gt, eps=1e-5):
r""" computational formula:
dice = (2 * tp) / (2 * tp + fp + fn)
"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
# 转为float,以防long类型之间相除结果为0
score = (2 * tp + eps).float() / (2 * tp + fp + fn + eps).float()
return score.sum() / N
def jaccard(pred, gt, eps=1e-5):
"""TP / (TP + FP + FN)"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0))
fp = torch.sum((pred_flat != 0) * (gt_flat == 0))
fn = torch.sum((pred_flat == 0) * (gt_flat != 0))
score = (tp.float() + eps) / ((tp + fp + fn).float() + eps)
return score.sum() / N
def jaccardFlat(pred, gt, eps=1e-5):
pred_flat = pred.squeeze()
gt_flat = gt.squeeze()
tp = torch.sum((pred_flat != 0) * (gt_flat != 0))
fp = torch.sum((pred_flat != 0) * (gt_flat == 0))
fn = torch.sum((pred_flat == 0) * (gt_flat != 0))
score = (tp.float() + eps) / ((tp + fp + fn).float() + eps)
return score
def jaccardv2(pred, gt, eps=1e-5):
"""TP / (TP + FP + FN)"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum(gt_flat * pred_flat, dim=1)
fp = torch.sum(pred_flat, dim=1) - tp
fn = torch.sum(gt_flat, dim=1) - tp
score = (tp + eps).float() / (tp + fp + fn + eps).float()
return score.sum() / N
def tversky(pred, gt, eps=1e-5, alpha=0.7):
"""TP / (TP + (1-alpha) * FP + alpha * FN)"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum(gt_flat * pred_flat, dim=1)
fp = torch.sum(pred_flat, dim=1) - tp
fn = torch.sum(gt_flat, dim=1) - tp
score = (tp + eps) / (tp + (1 - alpha) * fp + alpha * fn + eps)
return score.sum() / N
def accuracy(pred, gt, eps=1e-5):
"""(TP + TN) / (TP + FP + FN + TN)"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)
tn = torch.sum((pred_flat == 0) * (gt_flat == 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
score = ((tp + tn).float() + eps) / ((tp + fp + tn + fn).float() + eps)
return score.sum() / N
def precision(pred, gt, eps=1e-5):
"""TP / (TP + FP)"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0))
fp = torch.sum((pred_flat != 0) * (gt_flat == 0))
score = (tp.float() + eps) / ((tp + fp).float() + eps)
return score.sum() / N
def specificity(pred, gt, eps=1e-5):
"""TN / (TN + FP)"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0))
tn = torch.sum((pred_flat == 0) * (gt_flat == 0))
score = (tn.float() + eps) / ((fp + tn).float() + eps)
return score.sum() / N
if __name__ == '__main__':
# shape = torch.Size([2, 3, 4, 4])
# 模拟batch_size = 2
'''
1 0 0= bladder
0 1 0 = tumor
0 0 1= background
'''
pred = torch.Tensor([[
[[0, 1, 0, 0],
[1, 0, 0, 1],
[1, 0, 0, 1],
[0, 1, 1, 0]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]],
[[1, 0, 1, 1],
[0, 1, 1, 0],
[0, 0, 0, 0],
[1, 0, 0, 1]]]
])
gt = torch.Tensor([[
[[0, 1, 1, 0],
[1, 0, 0, 1],
[1, 0, 0, 1],
[0, 1, 1, 0]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]],
[[1, 0, 0, 1],
[0, 1, 1, 0],
[0, 0, 0, 0],
[1, 0, 0, 1]]]
])
dice1 = diceCoeff(pred[:, 0:1, :], gt[:, 0:1, :])
dice2 = jaccard(pred[:, 0:1, :], gt[:, 0:1, :])
dice3 = diceCoeffv3(pred[:, 0:1, :], gt[:, 0:1, :])
print(dice1, dice2, dice3)
|
smooth)
|
get_topo_view.validator.pb.go
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: get_topo_view.proto
package topo_view
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
_ "github.com/mwitkow/go-proto-validators"
github_com_mwitkow_go_proto_validators "github.com/mwitkow/go-proto-validators"
_ "github.com/easyopsapis/easyops-api-go/protorepo-models/easyops/model/topology"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
func (this *GetTopoViewRequest) Validate() error {
return nil
}
func (this *GetTopoViewResponseWrapper) Validate() error {
if this.Data != nil {
|
return nil
}
|
if err := github_com_mwitkow_go_proto_validators.CallValidatorIfExists(this.Data); err != nil {
return github_com_mwitkow_go_proto_validators.FieldError("Data", err)
}
}
|
boletim.controller.ts
|
import { Controller, Post } from '@nestjs/common';
import { BoletimService } from './boletim.service';
@Controller('boletim')
export class BoletimController {
constructor(private boletim: BoletimService) {}
@Post('agenda')
async atualizaApiAgenda() {
return await this.boletim.agenda();
|
}
}
|
|
color.go
|
package hapitypes
|
DeviceId string
Color RGB
}
func NewColorMsg(deviceId string, color RGB) *ColorMsg {
return &ColorMsg{
DeviceId: deviceId,
Color: color,
}
}
func (e *ColorMsg) InboundEventType() string {
return "ColorMsg"
}
func (e *ColorMsg) OutboundEventType() string {
return "ColorMsg"
}
func (e *ColorMsg) RedirectInbound(toDeviceId string) InboundEvent {
return NewColorMsg(toDeviceId, e.Color)
}
|
type ColorMsg struct {
|
typeset.js
|
import { Paragraph } from './paragraph';
import { adjustSize } from '../../../util';
var defaultFontMetrics = {
lineHeight: 0,
descent: 0,
};
// Each text model stored in the core has a corresponding typeset.
// When a model is changed, the core calls 'update()'.
//
// Paragraphs are separated with the character '\n'. Currently each paragraph is one line.
//
// A typeset must provide text fragments and cursor positions.
//
// A fragment consists of:
// - two alpha textures: one for the text per se and one for the stroke (outline); they should be bound when requested;
// - position of the text fragment assuming that the text origin is in (0, 0).
//
// For the text containing N characters the typeset must produce N + 1 cursor positions. Each of them is a point with two values x and y.
// Line height and descent are used to render the cursor. Cursor positions for subsequent lines must have y coordinates that differ
// exactly in line height.
var Typeset = /** @class */ (function () {
function Typeset(config) {
this.config = config;
this.isContextLost = false;
this.fontMetrics = defaultFontMetrics;
// We store paragraphs of text and update them when necessary.
// Fragments are owned by paragraphs, paragraphs are responsible for their lifetime,
// here we store fragments only to implement Core.TypesetInterop conveniently.
this.paragraphs = [];
this.fragments = [];
}
Typeset.prototype.unload = function () {
// might be called multiple times
this.destroy();
};
Typeset.prototype.contextLost = function () {
this.isContextLost = true;
this.destroy();
|
this.isContextLost = false;
};
// Updates the typeset with the new text. Passes the following arguments:
// text - the text to typeset
// textLength - the length of the text line (in UTF-32 code units)
// direction - text direction: 'ltr' or 'rtl'
// fontSize - font size in pixels
// cursorArray - array to be filled in with cursor data, the memory is pre-allocated in Emscripten heap
// for 2 * (textLength + 1) 32-bit integers (x and y coordinates for cursor positions)
//
// Returns true if the update was successful
// If the result is false, the core will not call the other functions from Core.TypesetInterop.
Typeset.prototype.update = function (text, textLength, direction, fontSize, cursorArray) {
if (this.isContextLost) {
// sanity check because we manipulate textures during update
return false;
}
this.fontMetrics = this.config.fontInfo.getFontMetrics(fontSize);
return (this.updateParagraphs(text, direction, fontSize) &&
this.collectParagraphData(cursorArray, textLength + 1));
};
// After update is completed successfully, the following functions are available:
Typeset.prototype.getFragmentCount = function () {
return this.fragments.length;
};
Typeset.prototype.bindNormal = function (fragmentIndex) {
return this.bindFragmentTexture(fragmentIndex, function (fragment) {
return fragment.bindNormal();
});
};
Typeset.prototype.bindStroke = function (fragmentIndex) {
return this.bindFragmentTexture(fragmentIndex, function (fragment) {
return fragment.bindStroke();
});
};
Typeset.prototype.getXBase = function (fragmentIndex) {
return this.getFragmentCoordinate(fragmentIndex, function (pos) { return pos.xbase; });
};
Typeset.prototype.getYBase = function (fragmentIndex) {
return this.getFragmentCoordinate(fragmentIndex, function (pos, yline) { return pos.ybase + yline; });
};
Typeset.prototype.getXOpposite = function (fragmentIndex) {
return this.getFragmentCoordinate(fragmentIndex, function (pos) { return pos.xopposite; });
};
Typeset.prototype.getYOpposite = function (fragmentIndex) {
return this.getFragmentCoordinate(fragmentIndex, function (pos, yline) { return pos.yopposite + yline; });
};
Typeset.prototype.getLineHeight = function () {
return this.fontMetrics.lineHeight;
};
Typeset.prototype.getDescent = function () {
return this.fontMetrics.descent;
};
Typeset.prototype.destroy = function () {
var _this = this;
// We should not release fragments explicitly because fragments are owned by paragraphs and released when we release paragraphs,
// fragments are stored here only for convenience.
this.fragments = [];
this.paragraphs.forEach(function (par) { return par.unload(_this.isContextLost); });
this.paragraphs = [];
};
Typeset.prototype.bindFragmentTexture = function (index, bindMethod) {
return bindMethod(this.fragments[index].fragment);
};
Typeset.prototype.getFragmentCoordinate = function (index, getter) {
var _a = this.fragments[index], fragment = _a.fragment, yline = _a.yline;
return getter(fragment.position, yline);
};
Typeset.prototype.updateParagraphs = function (text, direction, fontSize) {
var _this = this;
// Paragraphs are separated by '\n'. Currently one paragraph contains one line
var paragraphTexts = text.split('\n');
// We don't recreate, we reuse paragraphs.
// So we need the same number of paragraphs as the number of texts that we got in paragraphTexts array.
adjustSize(this.paragraphs, paragraphTexts.length, function () {
return new Paragraph(_this.config);
}, function (paragraph) {
// paragraphs contain textures and must be released explicitly
paragraph.unload(_this.isContextLost);
});
// Update each paragraph. The lengths of this.paragraphs and paragraphTexts are the same
return this.paragraphs.every(function (paragraph, index) {
return paragraph.update(paragraphTexts[index], direction, fontSize);
});
};
Typeset.prototype.collectParagraphData = function (cursorArray, cursorPosCount) {
// Since every paragraph is successfully updated, we need to collect fragments and cursor positions
return (this.collectFragments() &&
this.collectCursorPositions(cursorArray, cursorPosCount));
};
Typeset.prototype.collectFragments = function () {
var _this = this;
// For fragments we will record each fragment and the y coordinate of the line
// (currently one paragraph contains only one line).
this.fragments = []; // we don't own fragments (paragraphs do), so we don't need to delete textures explicitly
var lineHeight = this.fontMetrics.lineHeight;
this.paragraphs.forEach(function (par, parIndex) {
var yline = -parIndex * lineHeight;
par.textFragments.forEach(function (fragment) {
_this.fragments.push({ fragment: fragment, yline: yline });
});
});
return true;
};
Typeset.prototype.collectCursorPositions = function (cursorArray, cursorPosCount) {
// For cheaper interoperation the core preallocates the array in Emscripten memory heap.
// 'cursorArray' is the offset in this 32-bit heap.
// We must populate this array with the values from paragraphs.
//
// This array contains (cursorPosCount * 2) 32-bit numbers. For each cursor position it should store
// firstly x and then y coordinate.
// For example, if we have 3 cursor positions (12, -5), (16, -11), (22, -11) the core expects the array:
// [12, -5, 16, -11, 22, -11]
// which contains 6 elements
var count = 2 * cursorPosCount; // number of elements in the array
var heapBase = this.config.module.HEAP32.buffer;
var array = new Int32Array(heapBase, cursorArray, count);
var index = 0;
var lineHeight = this.fontMetrics.lineHeight;
this.paragraphs.forEach(function (par, parIndex) {
var yline = Math.round(-parIndex * lineHeight);
par.textCursorPositions.forEach(function (x) {
if (index < count - 1) {
// We must check the limits for not to destroy heap data
array[index] = Math.round(x);
array[index + 1] = yline;
}
index += 2;
});
});
return true;
};
return Typeset;
}());
export { Typeset };
//# sourceMappingURL=typeset.js.map
|
};
Typeset.prototype.contextRestored = function () {
|
index.js
|
import { GetPersonNums, PlayGame } from "./playgame.js";
import GetComRandomNum from "./comnum.js";
export default function BaseballGame() {
this.play = function (computerInputNumbers, userInputNumbers) {
return PlayGame(computerInputNumbers, userInputNumbers);
};
this.play(GetComRandomNum(), GetPersonNums());
}
// export default class BaseballGame {
// play(computerInputNumbers, userInputNumbers) {
|
// }
// }
new BaseballGame();
|
// return "결과 값 String";
|
link-tool.component.spec.ts
|
import { async, ComponentFixture, TestBed } from "@angular/core/testing"
import { LinkToolComponent } from "./link-tool.component"
describe("LinkToolComponent", () => {
let component: LinkToolComponent
let fixture: ComponentFixture<LinkToolComponent>
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [LinkToolComponent],
}).compileComponents()
|
fixture = TestBed.createComponent(LinkToolComponent)
component = fixture.componentInstance
fixture.detectChanges()
})
it("should create", () => {
expect(component).toBeTruthy()
})
})
|
}))
beforeEach(() => {
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.