file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
index.ts | import 'reflect-metadata';
import { ApolloServer, Config } from 'apollo-server-express';
import bodyParser from 'body-parser';
import { config } from 'dotenv';
import Express from 'express';
import helmet from 'helmet';
import createSchema from './utils/createSchema';
import { logger } from './utils/globalMethods';
import { createTypeormConn } from './utils/typeORMConn';
class | {
public express: Express.Application;
public apolloConfig: Config;
public apollo: ApolloServer;
constructor() {
config();
this.express = Express();
this.initializeDatabase();
this.initializeMiddlewares();
this.initializeControllers();
this.initializeApollo();
}
private async initializeApollo(): Promise<void> {
const { APP_NAME, ENVIRONMENT, RUN_PLAYGROUND = true } = process.env;
const apolloServerConfig: Config = {
cacheControl: { defaultMaxAge: 30 },
context: ({ req, res }: any) => ({ req, res }),
formatError: (error) => {
const { message, path } = error;
logger.error(
`Message: ${message.toUpperCase()} / On Path: ${JSON.stringify(
path,
)}`,
);
return error;
},
playground: RUN_PLAYGROUND
? { title: APP_NAME, workspaceName: ENVIRONMENT }
: false,
schema: await createSchema(),
};
if (ENVIRONMENT === 'production') {
apolloServerConfig.introspection = true;
}
const apolloServer = new ApolloServer(apolloServerConfig);
apolloServer.applyMiddleware({
app: this.express,
cors: true,
});
}
private initializeMiddlewares(): void {
this.express.use(bodyParser.json());
this.express.use(bodyParser.urlencoded({ extended: true }));
this.express.use(helmet({ contentSecurityPolicy: false }));
}
private async initializeDatabase(): Promise<void> {
try {
await createTypeormConn();
} catch (e) {
logger.error('Database connection error' + e.message);
}
}
private initializeControllers(): void {
this.express.get('/', (_, res) => res.json({ message: `Hi!` }));
}
public listen(): void {
const { APP_NAME, PORT = 3333 } = process.env;
logger.debug(`Starting ${APP_NAME} Server`);
this.express.listen(PORT, () => {
logger.debug(`App listening on the port ${PORT}`);
});
}
}
const app = new App();
app.listen();
| App |
pcc_lpit.rs | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::PCC_LPIT {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R { bits: self.register.get() }
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = "Possible values of the field `PCS`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PCSR {
#[doc = "Clock is off."]
_000,
#[doc = "Clock option 1"]
_001,
#[doc = "Clock option 2"]
_010,
#[doc = "Clock option 3"]
_011,
#[doc = "Clock option 4"]
_100,
#[doc = "Clock option 5"]
_101,
#[doc = "Clock option 6"]
_110,
#[doc = "Clock option 7"]
_111,
}
impl PCSR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
PCSR::_000 => 0,
PCSR::_001 => 1,
PCSR::_010 => 2,
PCSR::_011 => 3,
PCSR::_100 => 4,
PCSR::_101 => 5,
PCSR::_110 => 6,
PCSR::_111 => 7,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> PCSR |
#[doc = "Checks if the value of the field is `_000`"]
#[inline]
pub fn is_000(&self) -> bool {
*self == PCSR::_000
}
#[doc = "Checks if the value of the field is `_001`"]
#[inline]
pub fn is_001(&self) -> bool {
*self == PCSR::_001
}
#[doc = "Checks if the value of the field is `_010`"]
#[inline]
pub fn is_010(&self) -> bool {
*self == PCSR::_010
}
#[doc = "Checks if the value of the field is `_011`"]
#[inline]
pub fn is_011(&self) -> bool {
*self == PCSR::_011
}
#[doc = "Checks if the value of the field is `_100`"]
#[inline]
pub fn is_100(&self) -> bool {
*self == PCSR::_100
}
#[doc = "Checks if the value of the field is `_101`"]
#[inline]
pub fn is_101(&self) -> bool {
*self == PCSR::_101
}
#[doc = "Checks if the value of the field is `_110`"]
#[inline]
pub fn is_110(&self) -> bool {
*self == PCSR::_110
}
#[doc = "Checks if the value of the field is `_111`"]
#[inline]
pub fn is_111(&self) -> bool {
*self == PCSR::_111
}
}
#[doc = "Possible values of the field `CGC`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CGCR {
#[doc = "Clock disabled"]
_0,
#[doc = "Clock enabled. The current clock selection and divider options are locked."]
_1,
}
impl CGCR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
CGCR::_0 => false,
CGCR::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> CGCR {
match value {
false => CGCR::_0,
true => CGCR::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == CGCR::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == CGCR::_1
}
}
#[doc = "Possible values of the field `PR`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PRR {
#[doc = "Peripheral is not present."]
_0,
#[doc = "Peripheral is present."]
_1,
}
impl PRR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PRR::_0 => false,
PRR::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PRR {
match value {
false => PRR::_0,
true => PRR::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == PRR::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == PRR::_1
}
}
#[doc = "Values that can be written to the field `PCS`"]
pub enum PCSW {
#[doc = "Clock is off."]
_000,
#[doc = "Clock option 1"]
_001,
#[doc = "Clock option 2"]
_010,
#[doc = "Clock option 3"]
_011,
#[doc = "Clock option 4"]
_100,
#[doc = "Clock option 5"]
_101,
#[doc = "Clock option 6"]
_110,
#[doc = "Clock option 7"]
_111,
}
impl PCSW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
PCSW::_000 => 0,
PCSW::_001 => 1,
PCSW::_010 => 2,
PCSW::_011 => 3,
PCSW::_100 => 4,
PCSW::_101 => 5,
PCSW::_110 => 6,
PCSW::_111 => 7,
}
}
}
#[doc = r" Proxy"]
pub struct _PCSW<'a> {
w: &'a mut W,
}
impl<'a> _PCSW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PCSW) -> &'a mut W {
{
self.bits(variant._bits())
}
}
#[doc = "Clock is off."]
#[inline]
pub fn _000(self) -> &'a mut W {
self.variant(PCSW::_000)
}
#[doc = "Clock option 1"]
#[inline]
pub fn _001(self) -> &'a mut W {
self.variant(PCSW::_001)
}
#[doc = "Clock option 2"]
#[inline]
pub fn _010(self) -> &'a mut W {
self.variant(PCSW::_010)
}
#[doc = "Clock option 3"]
#[inline]
pub fn _011(self) -> &'a mut W {
self.variant(PCSW::_011)
}
#[doc = "Clock option 4"]
#[inline]
pub fn _100(self) -> &'a mut W {
self.variant(PCSW::_100)
}
#[doc = "Clock option 5"]
#[inline]
pub fn _101(self) -> &'a mut W {
self.variant(PCSW::_101)
}
#[doc = "Clock option 6"]
#[inline]
pub fn _110(self) -> &'a mut W {
self.variant(PCSW::_110)
}
#[doc = "Clock option 7"]
#[inline]
pub fn _111(self) -> &'a mut W {
self.variant(PCSW::_111)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 7;
const OFFSET: u8 = 24;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `CGC`"]
pub enum CGCW {
#[doc = "Clock disabled"]
_0,
#[doc = "Clock enabled. The current clock selection and divider options are locked."]
_1,
}
impl CGCW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
CGCW::_0 => false,
CGCW::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _CGCW<'a> {
w: &'a mut W,
}
impl<'a> _CGCW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: CGCW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Clock disabled"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(CGCW::_0)
}
#[doc = "Clock enabled. The current clock selection and divider options are locked."]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(CGCW::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 30;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 24:26 - Peripheral Clock Source Select"]
#[inline]
pub fn pcs(&self) -> PCSR {
PCSR::_from({
const MASK: u8 = 7;
const OFFSET: u8 = 24;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bit 30 - Clock Gate Control"]
#[inline]
pub fn cgc(&self) -> CGCR {
CGCR::_from({
const MASK: bool = true;
const OFFSET: u8 = 30;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 31 - Present"]
#[inline]
pub fn pr(&self) -> PRR {
PRR::_from({
const MASK: bool = true;
const OFFSET: u8 = 31;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 2147483648 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 24:26 - Peripheral Clock Source Select"]
#[inline]
pub fn pcs(&mut self) -> _PCSW {
_PCSW { w: self }
}
#[doc = "Bit 30 - Clock Gate Control"]
#[inline]
pub fn cgc(&mut self) -> _CGCW {
_CGCW { w: self }
}
}
| {
match value {
0 => PCSR::_000,
1 => PCSR::_001,
2 => PCSR::_010,
3 => PCSR::_011,
4 => PCSR::_100,
5 => PCSR::_101,
6 => PCSR::_110,
7 => PCSR::_111,
_ => unreachable!(),
}
} |
index.d.ts | // DO NOT EDIT! This file is autogenerated from scripts/data/index.d.ts.template.
export = ohm;
declare namespace ohm {
const ohmGrammar: Grammar;
/**
* Instantiate the Grammar defined by source. If specified, namespace is
* the Namespace to use when resolving external references in the grammar.
*/
function grammar(source: string, namespace?: Namespace): Grammar;
/**
* grammarFromScriptElement was removed in Ohm v16.0. See
* https://ohmjs.org/d/gfs for more info.
* @deprecated
*/
function grammarFromScriptElement(node?: unknown, namespace?: Namespace): Grammar;
/**
* Create a new Namespace containing Grammar instances for all of the
* grammars defined in source.
* If namespace is specified, it will be the prototype of the new
* Namespace.
*/
function grammars(source: string, namespace?: Namespace): Namespace;
/**
* grammarsFromScriptElements was removed in Ohm v16.0. See
* https://ohmjs.org/d/gfs for more info.
* @deprecated
*/
function grammarsFromScriptElements(nodeList?: unknown, namespace?: Namespace): Namespace;
/**
* Create a new namespace. If props is specified, all of its properties
* will be copied to the new namespace.
*/
function namespace(props?: Object): Namespace;
/**
* Create a new namespace which inherits from namespace. If props is
* specified, all of its properties will be copied to the new namespace.
*/
function extendNamespace(namespace: Namespace, props?: Object): Namespace;
/**
* A Namespace is a dictionary of Grammars
*/
interface Namespace {
[index: string]: Grammar;
}
/**
* An Ohm Grammar.
*/
interface Grammar {
name: string;
superGrammar: Grammar;
rules: {[ruleName: string]: RuleInfo};
/** Return true if the grammar is a built-in grammar, otherwise false. */
isBuiltIn(): boolean;
/**
* Try to match input with this grammar, returning a MatchResult. If
* startRule is given, it specifies the rule on which to start
* matching. By default, the start rule is inherited from the
* supergrammar, or if there is no supergrammar specified, it is the
* first rule in this grammar.
*/
match(input: string, startRule?: string): MatchResult;
/**
* Create a new Matcher object which supports incrementally matching
* this grammar against a changing input string.
*/
matcher(): Matcher;
/**
* Like match() except returns a trace object whose toString() returns
* a summary of each parsing step useful for debugging.
*/
trace(input: string, startRule?: string): Object;
/**
* Create a new Semantics object for this Grammar.
*/
createSemantics(): Semantics;
/**
* Create a new Semantics object for this Grammar that inherits all
* of the operations and attributes in superSemantics.
* This Grammar must be a descendant of the grammar associated with
* superSemantics.
*/
extendSemantics(superSemantics: Semantics): Semantics;
}
interface PExpr {}
/**
* Matcher objects are used to incrementally match a changing input
* against a Grammar, e.g. in an editor or IDE.
*/
interface Matcher {
/**
* Return the current input string.
*/
getInput(): string;
/**
* Set the input string to `str`.
*/
setInput(str: string): void;
/**
* Edit the current input string, replacing the characters between
* `startIdx` and `endIdx` with `str`.
*/
replaceInputRange(startIdx: number, endIdx: number, str: string): Matcher;
/**
* Like Grammar#match, but operates incrementally.
*/
match(optStartRule?: string): MatchResult;
/**
* Like Grammar#trace, but operates incrementally.
*/
trace(optStartRule?: string): Object;
}
/**
* Result of Grammar#match
*/
interface MatchResult {
/**
* True iff match succeeded
*/
succeeded(): boolean;
/**
* True iff match did not succeed
*/
failed(): boolean;
/**
* If match failed contains an error message indicating where and
* why the match failed. This message is suitable for end users of a
* language (i.e., people who do not have access to the grammar source).
*/
message?: string;
/**
* If match failed contains an abbreviated version of this.message that
* does not include an excerpt from the invalid input.
*/
shortMessage?: string;
}
/**
* A Semantics is a family of operations and/or attributes for a given
* grammar. Each operation/attribute has a unique name within the
* Semantics. A grammar may have any number of Semantics instances
* associated with it -- this means that the clients of a grammar
* (even in the same program) never have to worry about
* operation/attribute name clashes.
*/
interface Semantics {
/**
* Returns a dictionary containing operations and attributes defined by
* this Semantics on the result of a matched grammar. Operations are
* no-arg functions and attributes are properties.
*/
(match: MatchResult): Dict;
/**
* Add a new operation named name to this Semantics, using the
* semantic actions contained in actionDict. It is an error if there
* is already an operation or attribute called name in this semantics.
* Returns this Semantics.
*/
addOperation<T>(name: string, actionDict: ActionDict<T>): Semantics;
/**
* Add a new attribute named name to this Semantics, using the
* semantic actions contained in actionDict. It is an error if there
* is already an operation or attribute called name in this semantics.
* Returns this Semantics.
*/
addAttribute<T>(name: string, actionDict: ActionDict<T>): Semantics;
/**
* Extend the operation named name with the semantic actions contained
* in actionDict. name must be the name of an operation in the super
* semantics.
* Returns this Semantics.
*/
extendOperation<T>(name: string, actionDict: ActionDict<T>): Semantics;
/**
* Extend the attribute named name with the semantic actions contained
* in actionDict. name must be the name of an attribute in the super
* semantics.
* Returns this Semantics.
*/
extendAttribute<T>(name: string, actionDict: ActionDict<T>): Semantics;
}
/**
* A dictionary is indexed by strings.
*/
interface Dict {
[index: string]: any;
}
/**
* An ActionDict is a dictionary of Actions indexed by rule names.
*/
interface ActionDict<T> {
[index: string]: Action<T> | undefined;
_iter?: (this: IterationNode, ...children: Node[]) => T;
_nonterminal?: (this: NonterminalNode, ...children: Node[]) => T;
_terminal?: (this: TerminalNode) => T;
// Built-in rules
alnum?: (this: NonterminalNode, arg0: NonterminalNode) => T;
letter?: (this: NonterminalNode, arg0: NonterminalNode) => T;
digit?: (this: NonterminalNode, arg0: TerminalNode) => T;
hexDigit?: (this: NonterminalNode, arg0: NonterminalNode | TerminalNode) => T;
ListOf?: (this: NonterminalNode, arg0: NonterminalNode) => T;
NonemptyListOf?: (
this: NonterminalNode,
arg0: Node,
arg1: IterationNode,
arg2: IterationNode
) => T;
EmptyListOf?: (this: NonterminalNode) => T;
listOf?: (this: NonterminalNode, arg0: NonterminalNode) => T;
nonemptyListOf?: (
this: NonterminalNode,
arg0: Node,
arg1: IterationNode,
arg2: IterationNode
) => T;
emptyListOf?: (this: NonterminalNode) => T;
applySyntactic?: (this: NonterminalNode, arg0: Node) => T;
}
/**
* An Action is a function from ParseNodes, called with the children nodes
* of the node it is being executed on.
* The current node is passed as a dynamic this, requiring an ES5
* anonymous function with this typed as any.
*/
type Action<T> = (this: Node, ...args: Node[]) => T;
/**
* A node in the parse tree, passed to Action functions.
*/
interface Node {
/**
* Returns the child at index idx.
*/
child(idx: number): Node;
/**
* true if the node is a terminal node, otherwise false.
*/
isTerminal(): boolean;
/**
* true if the node is an iteration node, which corresponds to a
* +, *, or ? expression in the grammar.
*/
isIteration(): boolean;
/**
* An array containing the node's children.
*/
children: Node[];
/**
* The name of grammar rule that created the node.
*/
ctorName: string;
/**
* Captures the portion of the input that was consumed by the node.
*/
source: Interval;
/**
* Returns the contents of the input stream consumed by this node.
*/
sourceString: string;
/**
* The number of child nodes that the node has.
*/
numChildren: number;
/**
* True if Node is ? option
*/
isOptional(): boolean;
/**
* In addition to the properties defined above, within a given
* semantics, every node also has a method/property corresponding to
* each operation/attribute in the semantics.
* For example, in a semantics that has an operation named 'prettyPrint'
* and an attribute named 'freeVars', every node has a prettyPrint()
* method and a freeVars property.
* NOTE this means the above node properties can not be used as
* operation/attribute names.
*/
[index: string]: any;
}
interface IterationNode extends Node {}
interface NonterminalNode extends Node {}
interface TerminalNode extends Node {}
/**
* Interval in input string
*/
interface Interval {
/**
* Input stream of parse
*/
inputStream: any;
/**
* Starting index in input
*/
startIdx: number;
/**
* Ending index in input
*/
endIdx: number;
/**
* Contents of interval
*/
contents: string;
/**
* Returns a new Interval at the start of this one
*/
collapsedLeft(): Interval;
/**
* Returns a new Interval at the end of this one
*/
collapsedRight(): Interval;
| * but with whitespace trimmed from both ends.
*/
trimmed(): Interval;
/**
* Returns a new Interval that covers this Interval and all the
* argument Intervals. The new Interval will start at the lowest start
* index and end at the largest end index.
*/
coverageWith(...intervals: Interval[]): Interval;
/**
* Return a nicely-formatted string describing the start of the Interval
*/
getLineAndColumnMessage(): string;
}
interface RuleInfo {
body: PExpr;
formals: string[];
description: string;
source: Interval;
}
} | /**
* Returns a new Interval which contains the same contents as this one, |
main_test.go | package mario
import (
"testing"
)
type In struct {
Int0 int
IntSlice0 []int
}
var testCase = []struct {
In In
Expect []int
}{
// test cases here
{In{4, []int{1, 3, 1, 2}}, []int{1, 2}},
{In{2, []int{2, 1, 2, 1, 2, 1, 2, 1, 2}}, []int{2}},
{In{7, []int{1, 3, 5, 7}}, []int{1, 2, 3, 4, 5, 6, 7}},
{In{3, []int{3, 2, 1, 2, 1, 3, 2, 1, 2, 1, 3, 2, 3, 1}}, []int{1, 3}},
{In{6, []int{4, 5}}, []int{4, 5}},
}
func TestMostVisited(t *testing.T) {
tcs := testCase
for i := range tcs {
if !compareOnIntSlice(mostVisited(tcs[i].In.Int0, tcs[i].In.IntSlice0), tcs[i].Expect) {
t.Errorf("most visited test failed on case: %d\n", i)
}
}
}
func compareOnIntSlice(a, b []int) bool | {
if len(a) != len(b) {
return false
}
isEqual := true
for i := range a {
if a[i] != b[i] {
isEqual = false
break
}
}
return isEqual
} |
|
test_serverless.py | import unittest
from troposphere import Tags, Template
from troposphere.s3 import Filter, Rules, S3Key
from troposphere.serverless import (
Api, DeadLetterQueue, DeploymentPreference, Function, FunctionForPackaging,
LayerVersion, S3Event, S3Location, SimpleTable,
)
class TestServerless(unittest.TestCase):
def test_exactly_one_code(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri=S3Location(
Bucket="mybucket",
Key="mykey",
),
InlineCode="",
)
t = Template()
t.add_resource(serverless_func)
with self.assertRaises(ValueError):
t.to_json()
def test_s3_location(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri=S3Location(
Bucket="mybucket",
Key="mykey",
)
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_tags(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip",
Tags=Tags({
'Tag1': 'TagValue1',
'Tag2': 'TagValue2'
})
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_DLQ(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip",
DeadLetterQueue=DeadLetterQueue(
Type='SNS',
TargetArn='arn:aws:sns:us-east-1:000000000000:SampleTopic'
)
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_required_function(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip"
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_optional_auto_publish_alias(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip",
AutoPublishAlias="alias"
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_optional_deployment_preference(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip",
AutoPublishAlias="alias",
DeploymentPreference=DeploymentPreference(
Type="AllAtOnce"
)
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_required_api_definitionuri(self):
serverless_api = Api(
"SomeApi",
StageName='test',
DefinitionUri='s3://bucket/swagger.yml',
)
t = Template()
t.add_resource(serverless_api)
t.to_json()
swagger = {
"swagger": "2.0",
"info": {
"title": "swagger test",
},
"paths": {
"/test": {
"get": {
},
},
},
}
def test_required_api_both(self):
serverless_api = Api(
"SomeApi",
StageName='test',
DefinitionUri='s3://bucket/swagger.yml',
DefinitionBody=self.swagger,
)
t = Template()
t.add_resource(serverless_api)
with self.assertRaises(ValueError):
t.to_json()
def test_required_api_definitionbody(self):
serverless_api = Api(
"SomeApi",
StageName='test',
DefinitionBody=self.swagger,
)
t = Template()
t.add_resource(serverless_api)
t.to_json()
def test_api_no_definition(self):
serverless_api = Api(
"SomeApi",
StageName='test',
)
t = Template()
t.add_resource(serverless_api)
t.to_json()
def test_simple_table(self):
serverless_table = SimpleTable(
"SomeTable"
)
t = Template()
t.add_resource(serverless_table)
t.to_json()
def test_layer_version(self):
layer_version = LayerVersion(
"SomeLayer",
ContentUri="someuri",
)
t = Template()
t.add_resource(layer_version)
t.to_json()
layer_version = LayerVersion(
"SomeLayer",
)
t = Template()
t.add_resource(layer_version)
with self.assertRaises(ValueError):
t.to_json()
def test_s3_filter(self):
t = Template()
t.add_resource(
Function(
"ProcessorFunction",
Handler='process_file.handler',
CodeUri='.',
Runtime='python3.6',
Policies='AmazonS3FullAccess',
Events={
'FileUpload': S3Event(
'FileUpload',
Bucket="bucket",
Events=['s3:ObjectCreated:*'],
Filter=Filter(S3Key=S3Key(
Rules=[
Rules(Name="prefix", Value="upload/"),
Rules(Name="suffix", Value=".txt"),
],
))
)
}
)
)
t.to_json()
def test_policy_document(self):
t = Template()
t.add_resource(
Function(
"ProcessorFunction",
Handler='process_file.handler',
CodeUri='.',
Runtime='python3.6', | t.to_json()
t = Template()
t.add_resource(
Function(
"ProcessorFunction",
Handler='process_file.handler',
CodeUri='.',
Runtime='python3.6',
Policies=["AmazonS3FullAccess", "AmazonDynamoDBFullAccess"]
)
)
t.to_json()
t = Template()
t.add_resource(
Function(
"ProcessorFunction",
Handler='process_file.handler',
CodeUri='.',
Runtime='python3.6',
Policies={
"Statement": [{
"Effect": "Allow",
"Action": ["s3:GetObject", "s3:PutObject"],
"Resource": ["arn:aws:s3:::bucket/*"],
}]
},
)
)
t.to_json()
def test_packaging(self):
# test for no CodeUri or InlineCode
t = Template()
t.add_resource(
FunctionForPackaging(
"ProcessorFunction",
Handler='process_file.handler',
Runtime='python3.6',
Policies={
"Statement": [{
"Effect": "Allow",
"Action": ["s3:GetObject", "s3:PutObject"],
"Resource": ["arn:aws:s3:::bucket/*"],
}]
},
)
)
t.to_json()
if __name__ == '__main__':
unittest.main() | Policies="AmazonS3ReadOnly"
)
) |
dag.rs | //Copyright 2020 EinsteinDB Project Authors & WHTCORPS Inc. Licensed under Apache-2.0.
use super::*;
use protobuf::Message;
use ekvproto::interlock::{KeyCone, Request};
use ekvproto::kvrpc_timeshare::Context;
use fidel_timeshare::PrimaryCausetInfo;
use fidel_timeshare::{Aggregation, ExecType, FreeDaemon, IndexScan, Limit, Selection, BlockScan, TopN};
use fidel_timeshare::{ByItem, Expr, ExprType};
use fidel_timeshare::{Soliton, PosetDagRequest};
use milevadb_query_datatype::codec::{datum, Datum};
use edb::interlock::REQ_TYPE_DAG;
use violetabftstore::interlock::::codec::number::NumberEncoder;
pub struct DAGSelect {
pub execs: Vec<FreeDaemon>,
pub cols: Vec<PrimaryCausetInfo>,
pub order_by: Vec<ByItem>,
pub limit: Option<u64>,
pub aggregate: Vec<Expr>,
pub group_by: Vec<Expr>,
pub key_cone: KeyCone,
pub output_offsets: Option<Vec<u32>>,
}
impl DAGSelect {
pub fn from(Block: &Block) -> DAGSelect {
let mut exec = FreeDaemon::default();
exec.set_tp(ExecType::TypeBlockScan);
let mut tbl_scan = BlockScan::default();
let mut Block_info = Block.Block_info();
tbl_scan.set_Block_id(Block_info.get_Block_id());
let PrimaryCausets_info = Block_info.take_PrimaryCausets();
tbl_scan.set_PrimaryCausets(PrimaryCausets_info);
exec.set_tbl_scan(tbl_scan);
DAGSelect {
execs: vec![exec],
cols: Block.PrimaryCausets_info(),
order_by: vec![],
limit: None,
aggregate: vec![],
group_by: vec![],
key_cone: Block.get_record_cone_all(),
output_offsets: None,
}
}
pub fn from_index(Block: &Block, index: &PrimaryCauset) -> DAGSelect {
let idx = index.index;
let mut exec = FreeDaemon::default();
exec.set_tp(ExecType::TypeIndexScan);
let mut scan = IndexScan::default();
let mut index_info = Block.index_info(idx, true);
scan.set_Block_id(index_info.get_Block_id());
scan.set_index_id(idx);
let PrimaryCausets_info = index_info.take_PrimaryCausets();
scan.set_PrimaryCausets(PrimaryCausets_info.clone());
exec.set_idx_scan(scan);
let cone = Block.get_index_cone_all(idx);
DAGSelect {
execs: vec![exec],
cols: PrimaryCausets_info.to_vec(),
order_by: vec![],
limit: None,
aggregate: vec![],
group_by: vec![],
key_cone: cone,
output_offsets: None,
}
}
pub fn limit(mut self, n: u64) -> DAGSelect {
self.limit = Some(n);
self
}
pub fn order_by(mut self, col: &PrimaryCauset, desc: bool) -> DAGSelect {
let col_offset = offset_for_PrimaryCauset(&self.cols, col.id);
let mut item = ByItem::default();
let mut expr = Expr::default();
expr.set_field_type(col.as_field_type());
expr.set_tp(ExprType::PrimaryCausetRef);
expr.mut_val().encode_i64(col_offset).unwrap();
item.set_expr(expr);
item.set_desc(desc);
self.order_by.push(item);
self
}
pub fn count(self, col: &PrimaryCauset) -> DAGSelect {
self.aggr_col(col, ExprType::Count)
}
pub fn aggr_col(mut self, col: &PrimaryCauset, aggr_t: ExprType) -> DAGSelect {
let col_offset = offset_for_PrimaryCauset(&self.cols, col.id);
let mut col_expr = Expr::default();
col_expr.set_field_type(col.as_field_type());
col_expr.set_tp(ExprType::PrimaryCausetRef);
col_expr.mut_val().encode_i64(col_offset).unwrap();
let mut expr = Expr::default();
let mut expr_ft = col.as_field_type();
// Avg will contains two auxiliary PrimaryCausets (sum, count) and the sum should be a `Decimal`
if aggr_t == ExprType::Avg || aggr_t == ExprType::Sum {
expr_ft.set_tp(0xf6); // FieldTypeTp::NewDecimal
}
expr.set_field_type(expr_ft);
expr.set_tp(aggr_t);
expr.mut_children().push(col_expr);
self.aggregate.push(expr);
self
}
pub fn first(self, col: &PrimaryCauset) -> DAGSelect {
self.aggr_col(col, ExprType::First)
}
pub fn sum(self, col: &PrimaryCauset) -> DAGSelect {
self.aggr_col(col, ExprType::Sum)
}
pub fn avg(self, col: &PrimaryCauset) -> DAGSelect {
self.aggr_col(col, ExprType::Avg)
}
pub fn max(self, col: &PrimaryCauset) -> DAGSelect {
self.aggr_col(col, ExprType::Max)
}
pub fn min(self, col: &PrimaryCauset) -> DAGSelect {
self.aggr_col(col, ExprType::Min)
}
pub fn bit_and(self, col: &PrimaryCauset) -> DAGSelect {
self.aggr_col(col, ExprType::AggBitAnd)
}
pub fn bit_or(self, col: &PrimaryCauset) -> DAGSelect {
self.aggr_col(col, ExprType::AggBitOr)
}
pub fn bit_xor(self, col: &PrimaryCauset) -> DAGSelect |
pub fn group_by(mut self, cols: &[&PrimaryCauset]) -> DAGSelect {
for col in cols {
let offset = offset_for_PrimaryCauset(&self.cols, col.id);
let mut expr = Expr::default();
expr.set_field_type(col.as_field_type());
expr.set_tp(ExprType::PrimaryCausetRef);
expr.mut_val().encode_i64(offset).unwrap();
self.group_by.push(expr);
}
self
}
pub fn output_offsets(mut self, output_offsets: Option<Vec<u32>>) -> DAGSelect {
self.output_offsets = output_offsets;
self
}
pub fn where_expr(mut self, expr: Expr) -> DAGSelect {
let mut exec = FreeDaemon::default();
exec.set_tp(ExecType::TypeSelection);
let mut selection = Selection::default();
selection.mut_conditions().push(expr);
exec.set_selection(selection);
self.execs.push(exec);
self
}
pub fn build(self) -> Request {
self.build_with(Context::default(), &[0])
}
pub fn build_with(mut self, ctx: Context, flags: &[u64]) -> Request {
if !self.aggregate.is_empty() || !self.group_by.is_empty() {
let mut exec = FreeDaemon::default();
exec.set_tp(ExecType::TypeAggregation);
let mut aggr = Aggregation::default();
if !self.aggregate.is_empty() {
aggr.set_agg_func(self.aggregate.into());
}
if !self.group_by.is_empty() {
aggr.set_group_by(self.group_by.into());
}
exec.set_aggregation(aggr);
self.execs.push(exec);
}
if !self.order_by.is_empty() {
let mut exec = FreeDaemon::default();
exec.set_tp(ExecType::TypeTopN);
let mut topn = TopN::default();
topn.set_order_by(self.order_by.into());
if let Some(limit) = self.limit.take() {
topn.set_limit(limit);
}
exec.set_top_n(topn);
self.execs.push(exec);
}
if let Some(l) = self.limit.take() {
let mut exec = FreeDaemon::default();
exec.set_tp(ExecType::TypeLimit);
let mut limit = Limit::default();
limit.set_limit(l);
exec.set_limit(limit);
self.execs.push(exec);
}
let mut posetdag = PosetDagRequest::default();
posetdag.set_executors(self.execs.into());
posetdag.set_flags(flags.iter().fold(0, |acc, f| acc | *f));
posetdag.set_collect_cone_counts(true);
let output_offsets = if self.output_offsets.is_some() {
self.output_offsets.take().unwrap()
} else {
(0..self.cols.len() as u32).collect()
};
posetdag.set_output_offsets(output_offsets);
let mut req = Request::default();
req.set_spacelike_ts(next_id() as u64);
req.set_tp(REQ_TYPE_DAG);
req.set_data(posetdag.write_to_bytes().unwrap());
req.set_cones(vec![self.key_cone].into());
req.set_context(ctx);
req
}
}
pub struct DAGSolitonSpliter {
Solitons: Vec<Soliton>,
datums: Vec<Datum>,
col_cnt: usize,
}
impl DAGSolitonSpliter {
pub fn new(Solitons: Vec<Soliton>, col_cnt: usize) -> DAGSolitonSpliter {
DAGSolitonSpliter {
Solitons,
col_cnt,
datums: Vec::with_capacity(0),
}
}
}
impl Iteron for DAGSolitonSpliter {
type Item = Vec<Datum>;
fn next(&mut self) -> Option<Vec<Datum>> {
loop {
if self.Solitons.is_empty() && self.datums.is_empty() {
return None;
} else if self.datums.is_empty() {
let Soliton = self.Solitons.remove(0);
let mut data = Soliton.get_rows_data();
self.datums = datum::decode(&mut data).unwrap();
continue;
}
assert_eq!(self.datums.len() >= self.col_cnt, true);
let mut cols = self.datums.split_off(self.col_cnt);
std::mem::swap(&mut self.datums, &mut cols);
return Some(cols);
}
}
}
| {
self.aggr_col(col, ExprType::AggBitXor)
} |
external.rs | use crate::prelude::*;
use nu_engine::{evaluate_baseline_expr, BufCodecReader};
use nu_engine::{MaybeTextCodec, StringOrBinary};
use nu_test_support::NATIVE_PATH_ENV_VAR;
use parking_lot::Mutex;
use std::io::Write;
use std::ops::Deref;
use std::process::{Command, Stdio};
use std::sync::mpsc;
use std::{borrow::Cow, io::BufReader};
use log::trace;
use nu_errors::ShellError;
use nu_protocol::hir::Expression;
use nu_protocol::hir::{ExternalCommand, ExternalRedirection};
use nu_protocol::{Primitive, ShellTypeName, UntaggedValue, Value};
use nu_source::Tag;
pub(crate) fn run_external_command(
command: ExternalCommand,
context: &mut EvaluationContext,
input: InputStream,
external_redirection: ExternalRedirection,
) -> Result<InputStream, ShellError> {
trace!(target: "nu::run::external", "-> {}", command.name);
context.sync_path_to_env();
if !context.host.lock().is_external_cmd(&command.name) {
return Err(ShellError::labeled_error(
"Command not found",
format!("command {} not found", &command.name),
&command.name_tag,
));
}
run_with_stdin(command, context, input, external_redirection)
}
fn run_with_stdin(
command: ExternalCommand,
context: &mut EvaluationContext,
input: InputStream,
external_redirection: ExternalRedirection,
) -> Result<InputStream, ShellError> {
let path = context.shell_manager.path();
let mut command_args = vec![];
for arg in command.args.iter() {
let is_literal = matches!(arg.expr, Expression::Literal(_));
let value = evaluate_baseline_expr(arg, context)?;
// Skip any arguments that don't really exist, treating them as optional
// FIXME: we may want to preserve the gap in the future, though it's hard to say
// what value we would put in its place.
if value.value.is_none() {
continue;
}
// Do the cleanup that we need to do on any argument going out:
match &value.value {
UntaggedValue::Table(table) => {
for t in table {
match &t.value {
UntaggedValue::Primitive(_) => {
command_args.push((
t.convert_to_string().trim_end_matches('\n').to_string(),
is_literal,
));
}
_ => {
return Err(ShellError::labeled_error(
"Could not convert to positional arguments",
"could not convert to positional arguments",
value.tag(),
));
}
}
}
}
_ => {
let trimmed_value_string = value.as_string()?.trim_end_matches('\n').to_string();
command_args.push((trimmed_value_string, is_literal));
}
}
}
let process_args = command_args
.iter()
.map(|(arg, _is_literal)| {
let home_dir;
#[cfg(feature = "dirs")]
{
home_dir = dirs_next::home_dir;
}
#[cfg(not(feature = "dirs"))]
{
home_dir = || Some(std::path::PathBuf::from("/"));
}
let arg = expand_tilde(arg.deref(), home_dir);
#[cfg(not(windows))]
{
if !_is_literal {
let escaped = escape_double_quotes(&arg);
add_double_quotes(&escaped)
} else {
arg.as_ref().to_string()
}
}
#[cfg(windows)]
{
if let Some(unquoted) = remove_quotes(&arg) {
unquoted.to_string()
} else {
arg.as_ref().to_string()
}
}
})
.collect::<Vec<String>>();
spawn(
&command,
&path,
&process_args[..],
input,
external_redirection,
&context.scope,
)
}
fn spawn(
command: &ExternalCommand,
path: &str,
args: &[String],
input: InputStream,
external_redirection: ExternalRedirection,
scope: &Scope,
) -> Result<InputStream, ShellError> {
let command = command.clone();
let mut process = {
#[cfg(windows)]
{
let mut process = Command::new("cmd");
process.arg("/c");
process.arg(&command.name);
for arg in args {
// Clean the args before we use them:
let arg = arg.replace("|", "\\|");
process.arg(&arg);
}
process
}
#[cfg(not(windows))]
{
let cmd_with_args = vec![command.name.clone(), args.join(" ")].join(" ");
let mut process = Command::new("sh");
process.arg("-c").arg(cmd_with_args);
process
}
};
process.current_dir(path);
trace!(target: "nu::run::external", "cwd = {:?}", &path);
process.env_clear();
process.envs(scope.get_env_vars());
// We want stdout regardless of what
// we are doing ($it case or pipe stdin)
match external_redirection {
ExternalRedirection::Stdout => {
process.stdout(Stdio::piped());
trace!(target: "nu::run::external", "set up stdout pipe");
}
ExternalRedirection::Stderr => {
process.stderr(Stdio::piped());
trace!(target: "nu::run::external", "set up stderr pipe");
}
ExternalRedirection::StdoutAndStderr => {
process.stdout(Stdio::piped());
trace!(target: "nu::run::external", "set up stdout pipe");
process.stderr(Stdio::piped());
trace!(target: "nu::run::external", "set up stderr pipe");
}
_ => {}
}
// open since we have some contents for stdin
if !input.is_empty() {
process.stdin(Stdio::piped());
trace!(target: "nu::run::external", "set up stdin pipe");
}
trace!(target: "nu::run::external", "built command {:?}", process);
// TODO Switch to async_std::process once it's stabilized
match process.spawn() {
Ok(mut child) => {
let (tx, rx) = mpsc::sync_channel(0);
let mut stdin = child.stdin.take();
let stdin_write_tx = tx.clone();
let stdout_read_tx = tx;
let stdin_name_tag = command.name_tag.clone();
let stdout_name_tag = command.name_tag;
std::thread::spawn(move || {
if !input.is_empty() {
let mut stdin_write = stdin
.take()
.expect("Internal error: could not get stdin pipe for external command");
for value in input {
match &value.value {
UntaggedValue::Primitive(Primitive::Nothing) => continue,
UntaggedValue::Primitive(Primitive::String(s)) => {
if stdin_write.write(s.as_bytes()).is_err() {
// Other side has closed, so exit
return Ok(());
}
}
UntaggedValue::Primitive(Primitive::Binary(b)) => {
if stdin_write.write(b).is_err() {
// Other side has closed, so exit
return Ok(());
}
}
unsupported => {
println!("Unsupported: {:?}", unsupported);
let _ = stdin_write_tx.send(Ok(Value {
value: UntaggedValue::Error(ShellError::labeled_error(
format!(
"Received unexpected type from pipeline ({})",
unsupported.type_name()
),
"expected a string",
stdin_name_tag.clone(),
)),
tag: stdin_name_tag,
}));
return Err(());
}
};
}
}
Ok(())
});
std::thread::spawn(move || {
if external_redirection == ExternalRedirection::Stdout
|| external_redirection == ExternalRedirection::StdoutAndStderr
{
let stdout = if let Some(stdout) = child.stdout.take() {
stdout
} else {
let _ = stdout_read_tx.send(Ok(Value {
value: UntaggedValue::Error(ShellError::labeled_error(
"Can't redirect the stdout for external command",
"can't redirect stdout",
&stdout_name_tag,
)),
tag: stdout_name_tag,
}));
return Err(());
};
// let file = futures::io::AllowStdIo::new(stdout);
// let stream = FramedRead::new(file, MaybeTextCodec::default());
let buf_read = BufReader::new(stdout);
let buf_codec = BufCodecReader::new(buf_read, MaybeTextCodec::default());
for line in buf_codec {
match line {
Ok(line) => match line {
StringOrBinary::String(s) => {
let result = stdout_read_tx.send(Ok(Value {
value: UntaggedValue::Primitive(Primitive::String(
s.clone(),
)),
tag: stdout_name_tag.clone(),
}));
if result.is_err() {
break;
}
}
StringOrBinary::Binary(b) => {
let result = stdout_read_tx.send(Ok(Value {
value: UntaggedValue::Primitive(Primitive::Binary(
b.into_iter().collect(),
)),
tag: stdout_name_tag.clone(),
}));
if result.is_err() {
break;
}
}
},
Err(e) => {
// If there's an exit status, it makes sense that we may error when
// trying to read from its stdout pipe (likely been closed). In that
// case, don't emit an error.
let should_error = match child.wait() {
Ok(exit_status) => !exit_status.success(),
Err(_) => true,
};
if should_error {
let _ = stdout_read_tx.send(Ok(Value {
value: UntaggedValue::Error(ShellError::labeled_error(
format!("Unable to read from stdout ({})", e),
"unable to read from stdout",
&stdout_name_tag,
)),
tag: stdout_name_tag.clone(),
}));
}
return Ok(());
}
}
}
}
if external_redirection == ExternalRedirection::Stderr
|| external_redirection == ExternalRedirection::StdoutAndStderr
{
let stderr = if let Some(stderr) = child.stderr.take() {
stderr
} else {
let _ = stdout_read_tx.send(Ok(Value {
value: UntaggedValue::Error(ShellError::labeled_error(
"Can't redirect the stderr for external command",
"can't redirect stderr",
&stdout_name_tag,
)),
tag: stdout_name_tag,
}));
return Err(());
};
// let file = futures::io::AllowStdIo::new(stderr);
// let stream = FramedRead::new(file, MaybeTextCodec::default());
let buf_reader = BufReader::new(stderr);
let buf_codec = BufCodecReader::new(buf_reader, MaybeTextCodec::default());
for line in buf_codec {
match line {
Ok(line) => match line {
StringOrBinary::String(s) => {
let result = stdout_read_tx.send(Ok(Value {
value: UntaggedValue::Error(
ShellError::untagged_runtime_error(s),
),
tag: stdout_name_tag.clone(),
}));
if result.is_err() {
break;
}
}
StringOrBinary::Binary(_) => {
let result = stdout_read_tx.send(Ok(Value {
value: UntaggedValue::Error(
ShellError::untagged_runtime_error("<binary stderr>"),
),
tag: stdout_name_tag.clone(),
}));
if result.is_err() {
break;
}
}
},
Err(e) => {
// If there's an exit status, it makes sense that we may error when
// trying to read from its stdout pipe (likely been closed). In that
// case, don't emit an error.
let should_error = match child.wait() {
Ok(exit_status) => !exit_status.success(),
Err(_) => true,
};
if should_error {
let _ = stdout_read_tx.send(Ok(Value {
value: UntaggedValue::Error(ShellError::labeled_error(
format!("Unable to read from stdout ({})", e),
"unable to read from stdout",
&stdout_name_tag,
)),
tag: stdout_name_tag.clone(),
}));
}
return Ok(());
}
}
}
}
// We can give an error when we see a non-zero exit code, but this is different
// than what other shells will do.
let external_failed = match child.wait() {
Err(_) => true,
Ok(exit_status) => !exit_status.success(),
};
if external_failed {
let cfg = nu_data::config::config(Tag::unknown());
if let Ok(cfg) = cfg {
if cfg.contains_key("nonzero_exit_errors") {
let _ = stdout_read_tx.send(Ok(Value {
value: UntaggedValue::Error(ShellError::labeled_error(
"External command failed",
"command failed",
&stdout_name_tag,
)),
tag: stdout_name_tag.clone(),
}));
}
}
let _ = stdout_read_tx.send(Ok(Value {
value: UntaggedValue::Error(ShellError::external_non_zero()),
tag: stdout_name_tag,
}));
}
Ok(())
});
let stream = ChannelReceiver::new(rx);
Ok(stream.to_input_stream())
}
Err(e) => Err(ShellError::labeled_error(
format!("{}", e),
"failed to spawn",
&command.name_tag,
)),
}
}
struct ChannelReceiver {
rx: Arc<Mutex<mpsc::Receiver<Result<Value, ShellError>>>>,
}
impl ChannelReceiver {
pub fn new(rx: mpsc::Receiver<Result<Value, ShellError>>) -> Self {
Self {
rx: Arc::new(Mutex::new(rx)),
}
}
}
impl Iterator for ChannelReceiver {
type Item = Result<Value, ShellError>;
fn next(&mut self) -> Option<Self::Item> {
let rx = self.rx.lock();
match rx.recv() {
Ok(v) => Some(v),
Err(_) => None,
}
}
}
fn expand_tilde<SI: ?Sized, P, HD>(input: &SI, home_dir: HD) -> std::borrow::Cow<str>
where
SI: AsRef<str>,
P: AsRef<std::path::Path>,
HD: FnOnce() -> Option<P>,
{
shellexpand::tilde_with_context(input, home_dir)
}
fn argument_is_quoted(argument: &str) -> bool {
if argument.len() < 2 {
return false;
}
(argument.starts_with('"') && argument.ends_with('"'))
|| (argument.starts_with('\'') && argument.ends_with('\''))
}
#[allow(unused)]
fn add_double_quotes(argument: &str) -> String |
#[allow(unused)]
fn escape_double_quotes(argument: &str) -> Cow<'_, str> {
// allocate new string only if required
if argument.contains('"') {
Cow::Owned(argument.replace('"', r#"\""#))
} else {
Cow::Borrowed(argument)
}
}
#[allow(unused)]
fn remove_quotes(argument: &str) -> Option<&str> {
if !argument_is_quoted(argument) {
return None;
}
let size = argument.len();
Some(&argument[1..size - 1])
}
#[allow(unused)]
fn shell_os_paths() -> Vec<std::path::PathBuf> {
let mut original_paths = vec![];
if let Some(paths) = std::env::var_os(NATIVE_PATH_ENV_VAR) {
original_paths = std::env::split_paths(&paths).collect::<Vec<_>>();
}
original_paths
}
#[cfg(test)]
mod tests {
use super::{
add_double_quotes, argument_is_quoted, escape_double_quotes, expand_tilde, remove_quotes,
};
#[cfg(feature = "which")]
use super::{run_external_command, InputStream};
#[cfg(feature = "which")]
use nu_engine::EvaluationContext;
#[cfg(feature = "which")]
use nu_test_support::commands::ExternalBuilder;
// fn read(mut stream: OutputStream) -> Option<Value> {
// match stream.try_next() {
// Ok(val) => {
// if let Some(val) = val {
// val.raw_value()
// } else {
// None
// }
// }
// Err(_) => None,
// }
// }
#[cfg(feature = "which")]
fn non_existent_run() {
use nu_protocol::hir::ExternalRedirection;
let cmd = ExternalBuilder::for_name("i_dont_exist.exe").build();
let input = InputStream::empty();
let mut ctx = EvaluationContext::basic();
assert!(run_external_command(cmd, &mut ctx, input, ExternalRedirection::Stdout).is_err());
}
// fn failure_run() -> Result<(), ShellError> {
// let cmd = ExternalBuilder::for_name("fail").build();
// let mut ctx = crate::cli::EvaluationContext::basic().expect("There was a problem creating a basic context.");
// let stream = run_external_command(cmd, &mut ctx, None, false)
// ?
// .expect("There was a problem running the external command.");
// match read(stream.into()) {
// Some(Value {
// value: UntaggedValue::Error(_),
// ..
// }) => {}
// None | _ => panic!("Command didn't fail."),
// }
// Ok(())
// }
// #[test]
// fn identifies_command_failed() -> Result<(), ShellError> {
// block_on(failure_run())
// }
#[cfg(feature = "which")]
#[test]
fn identifies_command_not_found() {
non_existent_run()
}
#[test]
fn checks_escape_double_quotes() {
assert_eq!(escape_double_quotes("andrés"), "andrés");
assert_eq!(escape_double_quotes(r#"an"drés"#), r#"an\"drés"#);
assert_eq!(escape_double_quotes(r#""an"drés""#), r#"\"an\"drés\""#);
}
#[test]
fn checks_quotes_from_argument_to_be_passed_in() {
assert_eq!(argument_is_quoted(""), false);
assert_eq!(argument_is_quoted("'"), false);
assert_eq!(argument_is_quoted("'a"), false);
assert_eq!(argument_is_quoted("a"), false);
assert_eq!(argument_is_quoted("a'"), false);
assert_eq!(argument_is_quoted("''"), true);
assert_eq!(argument_is_quoted(r#"""#), false);
assert_eq!(argument_is_quoted(r#""a"#), false);
assert_eq!(argument_is_quoted(r#"a"#), false);
assert_eq!(argument_is_quoted(r#"a""#), false);
assert_eq!(argument_is_quoted(r#""""#), true);
assert_eq!(argument_is_quoted("'andrés"), false);
assert_eq!(argument_is_quoted("andrés'"), false);
assert_eq!(argument_is_quoted(r#""andrés"#), false);
assert_eq!(argument_is_quoted(r#"andrés""#), false);
assert_eq!(argument_is_quoted("'andrés'"), true);
assert_eq!(argument_is_quoted(r#""andrés""#), true);
}
#[test]
fn adds_double_quotes_to_argument_to_be_passed_in() {
assert_eq!(add_double_quotes("andrés"), "\"andrés\"");
}
#[test]
fn strips_quotes_from_argument_to_be_passed_in() {
assert_eq!(remove_quotes(""), None);
assert_eq!(remove_quotes("'"), None);
assert_eq!(remove_quotes("'a"), None);
assert_eq!(remove_quotes("a"), None);
assert_eq!(remove_quotes("a'"), None);
assert_eq!(remove_quotes("''"), Some(""));
assert_eq!(remove_quotes(r#"""#), None);
assert_eq!(remove_quotes(r#""a"#), None);
assert_eq!(remove_quotes(r#"a"#), None);
assert_eq!(remove_quotes(r#"a""#), None);
assert_eq!(remove_quotes(r#""""#), Some(""));
assert_eq!(remove_quotes("'andrés"), None);
assert_eq!(remove_quotes("andrés'"), None);
assert_eq!(remove_quotes(r#""andrés"#), None);
assert_eq!(remove_quotes(r#"andrés""#), None);
assert_eq!(remove_quotes("'andrés'"), Some("andrés"));
assert_eq!(remove_quotes(r#""andrés""#), Some("andrés"));
}
#[test]
fn expands_tilde_if_starts_with_tilde_character() {
assert_eq!(
expand_tilde("~", || Some(std::path::Path::new("the_path_to_nu_light"))),
"the_path_to_nu_light"
);
}
#[test]
fn does_not_expand_tilde_if_tilde_is_not_first_character() {
assert_eq!(
expand_tilde("1~1", || Some(std::path::Path::new("the_path_to_nu_light"))),
"1~1"
);
}
}
| {
format!("\"{}\"", argument)
} |
test-chrome-tabs-api.js | 'use strict';
const tabs = require('sdk/tabs');
const chromeTabs = require('chrome-tabs-api');
exports['test '] = function(assert, done) {
var url = 'data:text/html,' + encodeURIComponent('<title>Test</title>');
tabs.open({
url: url, | }
});
function testSdkTab(tab) {
var chromeTab = chromeTabs.toChromeTab(tab);
assert.equal(tab.url, chromeTab.url, 'Tab\'s "url" property must be equal');
assert.equal(false, chromeTab.pinned, 'Tab\'s "pinned" property must be false');
var ffTab = chromeTabs.toFirefoxTab(chromeTab);
assert.equal(tab, ffTab, 'toFirefoxTab(toChromeTab(tab)) === tab');
done();
}
};
require('sdk/test').run(exports); | onReady: function(tab) {
assert.ok(tab, 'SDK Tab exists'); // Pre-requisite
testSdkTab(tab);
testSdkTab = function() {}; // Run once |
nsenter_mount.go | // +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nsenter
import (
"fmt"
"os"
"path/filepath"
"strings"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/utils/nsenter"
utilpath "k8s.io/utils/path"
)
const (
// hostProcMountsPath is the default mount path for rootfs
hostProcMountsPath = "/rootfs/proc/1/mounts"
// hostProcMountinfoPath is the default mount info path for rootfs
hostProcMountinfoPath = "/rootfs/proc/1/mountinfo"
)
// Mounter implements mount.Interface
// Currently, all docker containers receive their own mount namespaces.
// Mounter works by executing nsenter to run commands in
// the host's mount namespace.
type Mounter struct {
ne *nsenter.Nsenter
// rootDir is location of /var/lib/kubelet directory.
rootDir string
}
// NewMounter creates a new mounter for kubelet that runs as a container.
func NewMounter(rootDir string, ne *nsenter.Nsenter) *Mounter {
return &Mounter{
rootDir: rootDir,
ne: ne,
}
}
// Mounter implements mount.Interface
var _ = mount.Interface(&Mounter{})
// Mount runs mount(8) in the host's root mount namespace. Aside from this
// aspect, Mount has the same semantics as the mounter returned by mount.New()
func (n *Mounter) Mount(source string, target string, fstype string, options []string) error {
bind, bindOpts, bindRemountOpts := mount.IsBind(options)
if bind {
err := n.doNsenterMount(source, target, fstype, bindOpts)
if err != nil {
return err
}
return n.doNsenterMount(source, target, fstype, bindRemountOpts)
}
return n.doNsenterMount(source, target, fstype, options)
}
// doNsenterMount nsenters the host's mount namespace and performs the
// requested mount.
func (n *Mounter) doNsenterMount(source, target, fstype string, options []string) error {
klog.V(5).Infof("nsenter mount %s %s %s %v", source, target, fstype, options)
cmd, args := n.makeNsenterArgs(source, target, fstype, options)
outputBytes, err := n.ne.Exec(cmd, args).CombinedOutput()
if len(outputBytes) != 0 {
klog.V(5).Infof("Output of mounting %s to %s: %v", source, target, string(outputBytes))
}
return err
}
// makeNsenterArgs makes a list of argument to nsenter in order to do the
// requested mount.
func (n *Mounter) makeNsenterArgs(source, target, fstype string, options []string) (string, []string) {
mountCmd := n.ne.AbsHostPath("mount")
mountArgs := mount.MakeMountArgs(source, target, fstype, options)
if systemdRunPath, hasSystemd := n.ne.SupportsSystemd(); hasSystemd {
// Complete command line:
// nsenter --mount=/rootfs/proc/1/ns/mnt -- /bin/systemd-run --description=... --scope -- /bin/mount -t <type> <what> <where>
// Expected flow is:
// * nsenter breaks out of container's mount namespace and executes
// host's systemd-run.
// * systemd-run creates a transient scope (=~ cgroup) and executes its
// argument (/bin/mount) there.
// * mount does its job, forks a fuse daemon if necessary and finishes.
// (systemd-run --scope finishes at this point, returning mount's exit
// code and stdout/stderr - thats one of --scope benefits).
// * systemd keeps the fuse daemon running in the scope (i.e. in its own
// cgroup) until the fuse daemon dies (another --scope benefit).
// Kubelet container can be restarted and the fuse daemon survives.
// * When the daemon dies (e.g. during unmount) systemd removes the
// scope automatically.
mountCmd, mountArgs = mount.AddSystemdScope(systemdRunPath, target, mountCmd, mountArgs)
} else {
// Fall back to simple mount when the host has no systemd.
// Complete command line:
// nsenter --mount=/rootfs/proc/1/ns/mnt -- /bin/mount -t <type> <what> <where>
// Expected flow is:
// * nsenter breaks out of container's mount namespace and executes host's /bin/mount.
// * mount does its job, forks a fuse daemon if necessary and finishes.
// * Any fuse daemon runs in cgroup of kubelet docker container,
// restart of kubelet container will kill it!
// No code here, mountCmd and mountArgs use /bin/mount
}
return mountCmd, mountArgs
}
// Unmount runs umount(8) in the host's mount namespace.
func (n *Mounter) Unmount(target string) error {
args := []string{target}
// No need to execute systemd-run here, it's enough that unmount is executed
// in the host's mount namespace. It will finish appropriate fuse daemon(s)
// running in any scope.
klog.V(5).Infof("nsenter unmount args: %v", args)
outputBytes, err := n.ne.Exec("umount", args).CombinedOutput()
if len(outputBytes) != 0 {
klog.V(5).Infof("Output of unmounting %s: %v", target, string(outputBytes))
}
return err
}
// List returns a list of all mounted filesystems in the host's mount namespace.
func (*Mounter) List() ([]mount.MountPoint, error) {
return mount.ListProcMounts(hostProcMountsPath)
}
// IsMountPointMatch tests if dir and mp are the same path
func (*Mounter) IsMountPointMatch(mp mount.MountPoint, dir string) bool {
deletedDir := fmt.Sprintf("%s\\040(deleted)", dir)
return (mp.Path == dir) || (mp.Path == deletedDir)
}
// IsLikelyNotMountPoint determines whether a path is a mountpoint by calling findmnt
// in the host's root mount namespace.
func (n *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
file, err := filepath.Abs(file)
if err != nil {
return true, err
}
// Check the directory exists
if _, err = os.Stat(file); os.IsNotExist(err) {
klog.V(5).Infof("findmnt: directory %s does not exist", file)
return true, err
}
// Resolve any symlinks in file, kernel would do the same and use the resolved path in /proc/mounts
resolvedFile, err := n.EvalHostSymlinks(file)
if err != nil {
return true, err
}
// Add --first-only option: since we are testing for the absence of a mountpoint, it is sufficient to get only
// the first of multiple possible mountpoints using --first-only.
// Also add fstype output to make sure that the output of target file will give the full path
// TODO: Need more refactoring for this function. Track the solution with issue #26996
args := []string{"-o", "target,fstype", "--noheadings", "--first-only", "--target", resolvedFile}
klog.V(5).Infof("nsenter findmnt args: %v", args)
out, err := n.ne.Exec("findmnt", args).CombinedOutput()
if err != nil {
klog.V(2).Infof("Failed findmnt command for path %s: %s %v", resolvedFile, out, err)
// Different operating systems behave differently for paths which are not mount points.
// On older versions (e.g. 2.20.1) we'd get error, on newer ones (e.g. 2.26.2) we'd get "/".
// It's safer to assume that it's not a mount point.
return true, nil
}
mountTarget, err := parseFindMnt(string(out))
if err != nil {
return false, err
}
klog.V(5).Infof("IsLikelyNotMountPoint findmnt output for path %s: %v:", resolvedFile, mountTarget)
if mountTarget == resolvedFile {
klog.V(5).Infof("IsLikelyNotMountPoint: %s is a mount point", resolvedFile)
return false, nil
}
klog.V(5).Infof("IsLikelyNotMountPoint: %s is not a mount point", resolvedFile)
return true, nil
}
// parse output of "findmnt -o target,fstype" and return just the target
func parseFindMnt(out string) (string, error) |
// DeviceOpened checks if block device in use by calling Open with O_EXCL flag.
// Returns true if open returns errno EBUSY, and false if errno is nil.
// Returns an error if errno is any error other than EBUSY.
// Returns with error if pathname is not a device.
func (n *Mounter) DeviceOpened(pathname string) (bool, error) {
return mount.ExclusiveOpenFailsOnDevice(pathname)
}
// PathIsDevice uses FileInfo returned from os.Stat to check if path refers
// to a device.
func (n *Mounter) PathIsDevice(pathname string) (bool, error) {
pathType, err := n.GetFileType(pathname)
isDevice := pathType == mount.FileTypeCharDev || pathType == mount.FileTypeBlockDev
return isDevice, err
}
//GetDeviceNameFromMount given a mount point, find the volume id from checking /proc/mounts
func (n *Mounter) GetDeviceNameFromMount(mountPath, pluginMountDir string) (string, error) {
return mount.GetDeviceNameFromMountLinux(n, mountPath, pluginMountDir)
}
// MakeRShared checks if path is shared and bind-mounts it as rshared if needed.
func (n *Mounter) MakeRShared(path string) error {
return mount.DoMakeRShared(path, hostProcMountinfoPath)
}
// GetFileType checks for file/directory/socket/block/character devices.
func (n *Mounter) GetFileType(pathname string) (mount.FileType, error) {
var pathType mount.FileType
outputBytes, err := n.ne.Exec("stat", []string{"-L", "--printf=%F", pathname}).CombinedOutput()
if err != nil {
if strings.Contains(string(outputBytes), "No such file") {
err = fmt.Errorf("%s does not exist", pathname)
} else {
err = fmt.Errorf("stat %s error: %v", pathname, string(outputBytes))
}
return pathType, err
}
switch string(outputBytes) {
case "socket":
return mount.FileTypeSocket, nil
case "character special file":
return mount.FileTypeCharDev, nil
case "block special file":
return mount.FileTypeBlockDev, nil
case "directory":
return mount.FileTypeDirectory, nil
case "regular file", "regular empty file":
return mount.FileTypeFile, nil
}
return pathType, fmt.Errorf("only recognise file, directory, socket, block device and character device")
}
// MakeDir creates a new directory.
func (n *Mounter) MakeDir(pathname string) error {
args := []string{"-p", pathname}
if _, err := n.ne.Exec("mkdir", args).CombinedOutput(); err != nil {
return err
}
return nil
}
// MakeFile creates an empty file.
func (n *Mounter) MakeFile(pathname string) error {
args := []string{pathname}
if _, err := n.ne.Exec("touch", args).CombinedOutput(); err != nil {
return err
}
return nil
}
// ExistsPath checks if pathname exists.
// Error is returned on any other error than "file not found".
func (n *Mounter) ExistsPath(pathname string) (bool, error) {
// Resolve the symlinks but allow the target not to exist. EvalSymlinks
// would return an generic error when the target does not exist.
hostPath, err := n.ne.EvalSymlinks(pathname, false /* mustExist */)
if err != nil {
return false, err
}
kubeletpath := n.ne.KubeletPath(hostPath)
return utilpath.Exists(utilpath.CheckFollowSymlink, kubeletpath)
}
// EvalHostSymlinks returns the path name after evaluating symlinks.
func (n *Mounter) EvalHostSymlinks(pathname string) (string, error) {
return n.ne.EvalSymlinks(pathname, true)
}
// GetMountRefs finds all mount references to the path, returns a
// list of paths. Path could be a mountpoint path, device or a normal
// directory (for bind mount).
func (n *Mounter) GetMountRefs(pathname string) ([]string, error) {
pathExists, pathErr := mount.PathExists(pathname)
if !pathExists || mount.IsCorruptedMnt(pathErr) {
return []string{}, nil
} else if pathErr != nil {
return nil, fmt.Errorf("Error checking path %s: %v", pathname, pathErr)
}
hostpath, err := n.ne.EvalSymlinks(pathname, true /* mustExist */)
if err != nil {
return nil, err
}
return mount.SearchMountPoints(hostpath, hostProcMountinfoPath)
}
// GetFSGroup returns FSGroup of pathname.
func (n *Mounter) GetFSGroup(pathname string) (int64, error) {
hostPath, err := n.ne.EvalSymlinks(pathname, true /* mustExist */)
if err != nil {
return -1, err
}
kubeletpath := n.ne.KubeletPath(hostPath)
return mount.GetFSGroupLinux(kubeletpath)
}
// GetSELinuxSupport tests if pathname is on a mount that supports SELinux.
func (n *Mounter) GetSELinuxSupport(pathname string) (bool, error) {
return mount.GetSELinux(pathname, hostProcMountsPath)
}
// GetMode returns permissions of pathname.
func (n *Mounter) GetMode(pathname string) (os.FileMode, error) {
hostPath, err := n.ne.EvalSymlinks(pathname, true /* mustExist */)
if err != nil {
return 0, err
}
kubeletpath := n.ne.KubeletPath(hostPath)
return mount.GetModeLinux(kubeletpath)
}
| {
// cut trailing newline
out = strings.TrimSuffix(out, "\n")
// cut everything after the last space - it's the filesystem type
i := strings.LastIndex(out, " ")
if i == -1 {
return "", fmt.Errorf("error parsing findmnt output, expected at least one space: %q", out)
}
return out[:i], nil
} |
switch.py | """Contains the Switch parent class."""
import asyncio
from functools import partial
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.machine import MachineController
from mpf.core.system_wide_device import SystemWideDevice
from mpf.core.utility_functions import Util
from mpf.core.platform import SwitchConfig
from mpf.devices.device_mixins import DevicePositionMixin
MYPY = False
if MYPY: # pragma: no cover
from mpf.platforms.interfaces.switch_platform_interface import SwitchPlatformInterface
from mpf.core.platform import SwitchPlatform
@DeviceMonitor("state", "recycle_jitter_count")
class Switch(SystemWideDevice, DevicePositionMixin):
"""A switch in a pinball machine."""
config_section = 'switches'
collection = 'switches'
class_label = 'switch'
__slots__ = ["hw_switch", "platform", "state", "hw_state", "invert", "recycle_secs", "recycle_clear_time",
"recycle_jitter_count", "_events_to_post", "last_change"]
def __init__(self, machine: MachineController, name: str) -> None:
"""Initialise switch."""
self.hw_switch = None # type: SwitchPlatformInterface
self.platform = None # type: SwitchPlatform
super().__init__(machine, name)
self.state = 0
""" The logical state of a switch. 1 = active, 0 = inactive. This takes
into consideration the NC or NO settings for the switch."""
self.hw_state = 0
""" The physical hardware state of the switch. 1 = active,
0 = inactive. This is what the actual hardware is reporting and does
not consider whether a switch is NC or NO."""
self.invert = 0
self.recycle_secs = 0
self.recycle_clear_time = None
self.recycle_jitter_count = 0
self._events_to_post = {0: [], 1: []}
self.last_change = -100000
# register switch so other devices can add handlers to it
self.machine.switch_controller.register_switch(self)
@classmethod
def device_class_init(cls, machine: MachineController):
"""Register handler for duplicate switch number checks."""
machine.events.add_handler("init_phase_4",
cls._check_duplicate_switch_numbers,
machine=machine)
@staticmethod
def _check_duplicate_switch_numbers(machine, **kwargs):
del kwargs
check_set = set()
for switch in machine.switches:
key = (switch.platform, switch.hw_switch.number)
if key in check_set:
raise AssertionError(
"Duplicate switch number {} for switch {}".format(
switch.hw_switch.number, switch))
check_set.add(key)
def validate_and_parse_config(self, config, is_mode_config, debug_prefix: str = None):
"""Validate switch config."""
config = super().validate_and_parse_config(config, is_mode_config, debug_prefix)
platform = self.machine.get_platform_sections(
'switches', getattr(config, "platform", None))
config['platform_settings'] = platform.validate_switch_section(
self, config.get('platform_settings', None))
self._configure_device_logging(config) |
def _create_activation_event(self, event_str: str, state: int):
if "|" in event_str:
event, ev_time = event_str.split("|")
ms = Util.string_to_ms(ev_time)
self.machine.switch_controller.add_switch_handler(
switch_name=self.name,
state=state,
callback=partial(self.machine.events.post, event=event),
ms=ms
)
else:
self._events_to_post[state].append(event_str)
def _recycle_passed(self, state):
self.recycle_clear_time = None
# only post event if the switch toggled
if self.state != state:
self._post_events(self.state)
def _post_events_with_recycle(self, state):
# if recycle is ongoing do nothing
if not self.recycle_clear_time:
# calculate clear time
self.recycle_clear_time = self.machine.clock.get_time() + self.recycle_secs
self.machine.clock.loop.call_at(self.recycle_clear_time, partial(self._recycle_passed, state))
# post event
self._post_events(state)
def _post_events(self, state):
for event in self._events_to_post[state]:
if self.machine.events.does_event_exist(event):
self.machine.events.post(event)
@asyncio.coroutine
def _initialize(self):
yield from super()._initialize()
self.platform = self.machine.get_platform_sections(
'switches', self.config['platform'])
if self.config['type'].upper() == 'NC':
self.invert = 1
self.recycle_secs = self.config['ignore_window_ms'] / 1000.0
config = SwitchConfig(invert=self.invert,
debounce=self.config['debounce'])
try:
self.hw_switch = self.platform.configure_switch(
self.config['number'], config, self.config['platform_settings'])
except AssertionError as e:
raise AssertionError("Failed to configure switch {} in platform. See error above".format(self.name)) from e
if self.recycle_secs:
self.add_handler(state=1, callback=self._post_events_with_recycle, callback_kwargs={"state": 1})
self.add_handler(state=0, callback=self._post_events_with_recycle, callback_kwargs={"state": 0})
else:
self.add_handler(state=1, callback=self._post_events, callback_kwargs={"state": 1})
self.add_handler(state=0, callback=self._post_events, callback_kwargs={"state": 0})
if self.machine.config['mpf']['auto_create_switch_events']:
self._create_activation_event(
self.machine.config['mpf']['switch_event_active'].replace(
'%', self.name), 1)
self._create_activation_event(
self.machine.config['mpf']['switch_event_inactive'].replace(
'%', self.name), 0)
for tag in self.tags:
self._create_activation_event(
self.machine.config['mpf']['switch_tag_event'].replace(
'%', tag), 1)
self._create_activation_event(
self.machine.config['mpf']['switch_tag_event'].replace(
'%', tag) + "_active", 1)
self._create_activation_event(
self.machine.config['mpf']['switch_tag_event'].replace(
'%', tag) + "_inactive", 0)
for event in Util.string_to_lowercase_list(
self.config['events_when_activated']):
self._create_activation_event(event, 1)
for event in Util.string_to_lowercase_list(
self.config['events_when_deactivated']):
self._create_activation_event(event, 0)
# pylint: disable-msg=too-many-arguments
def add_handler(self, callback, state=1, ms=0, return_info=False,
callback_kwargs=None):
"""Add switch handler (callback) for this switch which is called when this switch state changes.
Note that this method just calls the
:doc:`Switch Controller's <self.machine.switch_controller>`
``add_switch_handler()`` method behind the scenes.
Args:
callback: A callable method that will be called when the switch
state changes.
state: The state that the switch which change into which triggers
the callback to be called. Values are 0 or 1, with 0 meaning
the switch changed to inactive, and 1 meaning the switch
changed to an active state.
ms: How many milliseconds the switch needs to be in the new state
before the callback is called. Default is 0 which means that
the callback will be called immediately. You can use this
setting as a form of software debounce, as the switch needs to
be in the state consistently before the callback is called.
return_info: If True, the switch controller will pass the
parameters of the switch handler as arguments to the callback,
including switch_name, state, and ms.
callback_kwargs: Additional kwargs that will be passed with the
callback.
"""
return self.machine.switch_controller.add_switch_handler(
self.name, callback, state, ms, return_info, callback_kwargs)
def remove_handler(self, callback, state=1, ms=0):
"""Remove switch handler for this switch."""
return self.machine.switch_controller.remove_switch_handler(
self.name, callback, state, ms) | return config |
protocol_gen_test.go | package protocol
// NOTE: THIS FILE WAS PRODUCED BY THE
// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
// DO NOT EDIT
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalEventData(t *testing.T) {
v := EventData{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgEventData(b *testing.B) {
v := EventData{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgEventData(b *testing.B) {
v := EventData{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalEventData(b *testing.B) {
v := EventData{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeEventData(t *testing.T) {
v := EventData{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
}
vn := EventData{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeEventData(b *testing.B) {
v := EventData{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeEventData(b *testing.B) {
v := EventData{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalTopicInfo(t *testing.T) {
v := TopicInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgTopicInfo(b *testing.B) {
v := TopicInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgTopicInfo(b *testing.B) {
v := TopicInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalTopicInfo(b *testing.B) {
v := TopicInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeTopicInfo(t *testing.T) {
v := TopicInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
}
vn := TopicInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
} | v := TopicInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeTopicInfo(b *testing.B) {
v := TopicInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
} |
func BenchmarkEncodeTopicInfo(b *testing.B) { |
operations.rs | #![doc = "generated by AutoRust"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
#![allow(clippy::redundant_clone)]
use super::models;
#[derive(Clone)]
pub struct Client {
endpoint: String,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
pipeline: azure_core::Pipeline,
}
#[derive(Clone)]
pub struct ClientBuilder {
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
endpoint: Option<String>,
scopes: Option<Vec<String>>,
}
pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD;
impl ClientBuilder {
pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self {
Self {
credential,
endpoint: None,
scopes: None,
}
}
pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self {
self.endpoint = Some(endpoint.into());
self
}
pub fn scopes(mut self, scopes: &[&str]) -> Self {
self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect());
self
}
pub fn build(self) -> Client {
let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned());
let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]);
Client::new(endpoint, self.credential, scopes)
}
}
impl Client {
pub(crate) fn endpoint(&self) -> &str {
self.endpoint.as_str()
}
pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential {
self.credential.as_ref()
}
pub(crate) fn scopes(&self) -> Vec<&str> {
self.scopes.iter().map(String::as_str).collect()
}
pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> azure_core::error::Result<azure_core::Response> {
let mut context = azure_core::Context::default();
let mut request = request.into();
self.pipeline.send(&mut context, &mut request).await
}
pub fn new(
endpoint: impl Into<String>,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
) -> Self {
let endpoint = endpoint.into();
let pipeline = azure_core::Pipeline::new(
option_env!("CARGO_PKG_NAME"),
option_env!("CARGO_PKG_VERSION"),
azure_core::ClientOptions::default(),
Vec::new(),
Vec::new(),
);
Self {
endpoint,
credential,
scopes,
pipeline,
}
}
pub fn asc_operations(&self) -> asc_operations::Client {
asc_operations::Client(self.clone())
}
pub fn caches(&self) -> caches::Client {
caches::Client(self.clone())
}
pub fn operations(&self) -> operations::Client {
operations::Client(self.clone())
}
pub fn skus(&self) -> skus::Client {
skus::Client(self.clone())
}
pub fn storage_targets(&self) -> storage_targets::Client {
storage_targets::Client(self.clone())
}
pub fn usage_models(&self) -> usage_models::Client {
usage_models::Client(self.clone())
}
}
pub mod operations {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self) -> list::Builder {
list::Builder { client: self.0.clone() }
}
}
pub mod list {
use super::models;
use azure_core::error::ResultExt;
type Response = models::ApiOperationListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = &format!("{}/providers/Microsoft.StorageCache/operations", this.client.endpoint(),);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ApiOperationListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
}
pub mod skus {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
}
pub mod list {
use super::models;
use azure_core::error::ResultExt;
type Response = models::ResourceSkusResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.StorageCache/skus",
this.client.endpoint(),
&this.subscription_id
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ResourceSkusResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
}
pub mod usage_models {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
}
pub mod list {
use super::models;
use azure_core::error::ResultExt;
type Response = models::UsageModelsResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.StorageCache/usageModels",
this.client.endpoint(),
&this.subscription_id
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::UsageModelsResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
}
pub mod asc_operations {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
subscription_id: impl Into<String>,
location: impl Into<String>,
operation_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
location: location.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get {
use super::models;
use azure_core::error::ResultExt;
type Response = models::AscOperation;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) location: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.StorageCache/locations/{}/ascOperations/{}",
this.client.endpoint(),
&this.subscription_id,
&this.location,
&this.operation_id
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::AscOperation = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
}
pub mod caches {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
pub fn list_by_resource_group(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get(
&self,
resource_group_name: impl Into<String>,
cache_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cache_name: cache_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
cache_name: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
cache_name: cache_name.into(),
cache: None,
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
cache_name: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
cache_name: cache_name.into(),
cache: None,
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
cache_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
cache_name: cache_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn debug_info(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
cache_name: impl Into<String>,
) -> debug_info::Builder {
debug_info::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
cache_name: cache_name.into(),
}
}
pub fn flush(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
cache_name: impl Into<String>,
) -> flush::Builder {
flush::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
cache_name: cache_name.into(),
}
}
pub fn start(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
cache_name: impl Into<String>,
) -> start::Builder {
start::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
cache_name: cache_name.into(),
}
}
pub fn stop(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
cache_name: impl Into<String>,
) -> stop::Builder {
stop::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
cache_name: cache_name.into(),
}
}
pub fn upgrade_firmware(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
cache_name: impl Into<String>,
) -> upgrade_firmware::Builder {
upgrade_firmware::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
cache_name: cache_name.into(),
}
}
}
pub mod list {
use super::models;
use azure_core::error::ResultExt;
type Response = models::CachesListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.StorageCache/caches",
this.client.endpoint(),
&this.subscription_id
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::CachesListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
pub mod list_by_resource_group {
use super::models;
use azure_core::error::ResultExt;
type Response = models::CachesListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::CachesListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
pub mod get {
use super::models;
use azure_core::error::ResultExt;
type Response = models::Cache;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cache_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> |
}
}
pub mod create_or_update {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200(models::Cache),
Created201(models::Cache),
Accepted202,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) cache_name: String,
pub(crate) cache: Option<models::Cache>,
}
impl Builder {
pub fn cache(mut self, cache: impl Into<models::Cache>) -> Self {
self.cache = Some(cache.into());
self
}
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.cache_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(cache) = &this.cache {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(cache)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::Cache = serde_json::from_slice(&rsp_body)?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::Cache = serde_json::from_slice(&rsp_body)?;
Ok(Response::Created201(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod update {
use super::models;
use azure_core::error::ResultExt;
type Response = models::Cache;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) cache_name: String,
pub(crate) cache: Option<models::Cache>,
}
impl Builder {
pub fn cache(mut self, cache: impl Into<models::Cache>) -> Self {
self.cache = Some(cache.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.cache_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(cache) = &this.cache {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(cache)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::Cache = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod delete {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) cache_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.cache_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod debug_info {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) cache_name: String,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches/{}/debugInfo",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.cache_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod flush {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) cache_name: String,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches/{}/flush",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.cache_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod start {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) cache_name: String,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches/{}/start",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.cache_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod stop {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) cache_name: String,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches/{}/stop",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.cache_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod upgrade_firmware {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Created201,
Accepted202,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) cache_name: String,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches/{}/upgrade",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.cache_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::CREATED => Ok(Response::Created201),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
}
pub mod storage_targets {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn dns_refresh(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
cache_name: impl Into<String>,
storage_target_name: impl Into<String>,
) -> dns_refresh::Builder {
dns_refresh::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
cache_name: cache_name.into(),
storage_target_name: storage_target_name.into(),
}
}
pub fn list_by_cache(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
cache_name: impl Into<String>,
) -> list_by_cache::Builder {
list_by_cache::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
cache_name: cache_name.into(),
}
}
pub fn get(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
cache_name: impl Into<String>,
storage_target_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
cache_name: cache_name.into(),
storage_target_name: storage_target_name.into(),
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
cache_name: impl Into<String>,
storage_target_name: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
cache_name: cache_name.into(),
storage_target_name: storage_target_name.into(),
storagetarget: None,
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
cache_name: impl Into<String>,
storage_target_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
cache_name: cache_name.into(),
storage_target_name: storage_target_name.into(),
}
}
}
pub mod dns_refresh {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) cache_name: String,
pub(crate) storage_target_name: String,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches/{}/storageTargets/{}/dnsRefresh",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.cache_name,
&this.storage_target_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod list_by_cache {
use super::models;
use azure_core::error::ResultExt;
type Response = models::StorageTargetsResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) cache_name: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches/{}/storageTargets",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.cache_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::StorageTargetsResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
pub mod get {
use super::models;
use azure_core::error::ResultExt;
type Response = models::StorageTarget;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) cache_name: String,
pub(crate) storage_target_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches/{}/storageTargets/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.cache_name,
&this.storage_target_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::StorageTarget = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200(models::StorageTarget),
Created201(models::StorageTarget),
Accepted202,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) cache_name: String,
pub(crate) storage_target_name: String,
pub(crate) storagetarget: Option<models::StorageTarget>,
}
impl Builder {
pub fn storagetarget(mut self, storagetarget: impl Into<models::StorageTarget>) -> Self {
self.storagetarget = Some(storagetarget.into());
self
}
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches/{}/storageTargets/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.cache_name,
&this.storage_target_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = if let Some(storagetarget) = &this.storagetarget {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(storagetarget)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::StorageTarget = serde_json::from_slice(&rsp_body)?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::StorageTarget = serde_json::from_slice(&rsp_body)?;
Ok(Response::Created201(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod delete {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) cache_name: String,
pub(crate) storage_target_name: String,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches/{}/storageTargets/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.cache_name,
&this.storage_target_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
}
| {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.StorageCache/caches/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.cache_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2021-03-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::Cache = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
} |
external_repo.py | import logging
import os
import tempfile
import threading
from contextlib import contextmanager
from typing import Dict
from funcy import retry, wrap_with
from dvc.exceptions import (
FileMissingError,
NoOutputInExternalRepoError,
NoRemoteInExternalRepoError,
NotDvcRepoError,
OutputNotFoundError,
PathMissingError,
)
from dvc.repo import Repo
from dvc.utils import relpath
logger = logging.getLogger(__name__)
@contextmanager
def external_repo(
url, rev=None, for_write=False, cache_dir=None, cache_types=None, **kwargs
):
from dvc.config import NoRemoteError
from dvc.scm.git import Git
logger.debug("Creating external repo %s@%s", url, rev)
path = _cached_clone(url, rev, for_write=for_write)
# Local HEAD points to the tip of whatever branch we first cloned from
# (which may not be the default branch), use origin/HEAD here to get
# the tip of the default branch
rev = rev or "refs/remotes/origin/HEAD"
cache_config = {
"cache": {
"dir": cache_dir or _get_cache_dir(url),
"type": cache_types,
}
}
config = _get_remote_config(url) if os.path.isdir(url) else {}
config.update(cache_config)
def make_repo(path, **_kwargs):
_config = cache_config.copy()
if os.path.isdir(url):
rel = os.path.relpath(path, _kwargs["scm"].root_dir)
repo_path = os.path.join(url, rel)
_config.update(_get_remote_config(repo_path))
return Repo(path, config=_config, **_kwargs)
root_dir = path if for_write else os.path.realpath(path)
repo_kwargs = dict(
root_dir=root_dir,
url=url,
scm=None if for_write else Git(root_dir),
rev=None if for_write else rev,
config=config,
repo_factory=make_repo,
**kwargs,
)
if "subrepos" not in repo_kwargs:
repo_kwargs["subrepos"] = True
if "uninitialized" not in repo_kwargs:
repo_kwargs["uninitialized"] = True
repo = Repo(**repo_kwargs)
try:
yield repo
except NoRemoteError as exc:
raise NoRemoteInExternalRepoError(url) from exc
except OutputNotFoundError as exc:
if exc.repo is repo:
raise NoOutputInExternalRepoError(
exc.output, repo.root_dir, url
) from exc
raise
except FileMissingError as exc:
raise PathMissingError(exc.path, url) from exc
finally:
repo.close()
if for_write:
_remove(path)
CLONES: Dict[str, str] = {}
CACHE_DIRS: Dict[str, str] = {}
@wrap_with(threading.Lock())
def _get_cache_dir(url):
try:
cache_dir = CACHE_DIRS[url]
except KeyError:
cache_dir = CACHE_DIRS[url] = tempfile.mkdtemp("dvc-cache")
return cache_dir
def clean_repos():
# Outside code should not see cache while we are removing
paths = [path for path, _ in CLONES.values()] + list(CACHE_DIRS.values())
CLONES.clear()
CACHE_DIRS.clear()
for path in paths:
_remove(path)
def _get_remote_config(url):
try:
repo = Repo(url)
except NotDvcRepoError:
return {}
try:
name = repo.config["core"].get("remote")
if not name:
# Fill the empty upstream entry with a new remote pointing to the
# original repo's cache location.
name = "auto-generated-upstream"
return {
"core": {"remote": name},
"remote": {name: {"url": repo.odb.local.cache_dir}},
}
# Use original remote to make sure that we are using correct url,
# credential paths, etc if they are relative to the config location.
return {"remote": {name: repo.config["remote"][name]}}
finally:
repo.close()
def _cached_clone(url, rev, for_write=False):
"""Clone an external git repo to a temporary directory.
Returns the path to a local temporary directory with the specified
revision checked out. If for_write is set prevents reusing this dir via
cache.
"""
from distutils.dir_util import copy_tree
# even if we have already cloned this repo, we may need to
# fetch/fast-forward to get specified rev
clone_path, shallow = _clone_default_branch(url, rev, for_write=for_write)
if not for_write and (url) in CLONES:
return CLONES[url][0]
# Copy to a new dir to keep the clone clean
repo_path = tempfile.mkdtemp("dvc-erepo")
logger.debug("erepo: making a copy of %s clone", url)
copy_tree(clone_path, repo_path)
# Check out the specified revision
if for_write:
_git_checkout(repo_path, rev)
else:
CLONES[url] = (repo_path, shallow)
return repo_path
@wrap_with(threading.Lock())
def _clone_default_branch(url, rev, for_write=False):
|
def _unshallow(git):
if git.gitpython.repo.head.is_detached:
# If this is a detached head (i.e. we shallow cloned a tag) switch to
# the default branch
origin_refs = git.gitpython.repo.remotes["origin"].refs
ref = origin_refs["HEAD"].reference
branch_name = ref.name.split("/")[-1]
branch = git.gitpython.repo.create_head(branch_name, ref)
branch.set_tracking_branch(ref)
branch.checkout()
git.pull(unshallow=True)
def _git_checkout(repo_path, rev):
from dvc.scm.git import Git
logger.debug("erepo: git checkout %s@%s", repo_path, rev)
git = Git(repo_path)
try:
git.checkout(rev)
finally:
git.close()
def _remove(path):
from dvc.utils.fs import remove
if os.name == "nt":
# git.exe may hang for a while not permitting to remove temp dir
os_retry = retry(5, errors=OSError, timeout=0.1)
try:
os_retry(remove)(path)
except PermissionError:
logger.warning(
"Failed to remove '%s'", relpath(path), exc_info=True
)
else:
remove(path)
| """Get or create a clean clone of the url.
The cloned is reactualized with git pull unless rev is a known sha.
"""
from dvc.scm.git import Git
clone_path, shallow = CLONES.get(url, (None, False))
git = None
try:
if clone_path:
git = Git(clone_path)
# Do not pull for known shas, branches and tags might move
if not Git.is_sha(rev) or not git.has_rev(rev):
if shallow:
# If we are missing a rev in a shallow clone, fallback to
# a full (unshallowed) clone. Since fetching specific rev
# SHAs is only available in certain git versions, if we
# have need to reference multiple specific revs for a
# given repo URL it is easier/safer for us to work with
# full clones in this case.
logger.debug("erepo: unshallowing clone for '%s'", url)
_unshallow(git)
shallow = False
CLONES[url] = (clone_path, shallow)
else:
logger.debug("erepo: git pull '%s'", url)
git.pull()
else:
logger.debug("erepo: git clone '%s' to a temporary dir", url)
clone_path = tempfile.mkdtemp("dvc-clone")
if not for_write and rev and not Git.is_sha(rev):
# If rev is a tag or branch name try shallow clone first
from dvc.scm.base import CloneError
try:
git = Git.clone(url, clone_path, shallow_branch=rev)
shallow = True
logger.debug(
"erepo: using shallow clone for branch '%s'", rev
)
except CloneError:
pass
if not git:
git = Git.clone(url, clone_path)
shallow = False
CLONES[url] = (clone_path, shallow)
finally:
if git:
git.close()
return clone_path, shallow |
backup_script.py | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import datetime
import importlib
import shutil
import sys
import tempfile
from dateutil.parser import parse
from aiida.backends.utils import is_dbenv_loaded, load_dbenv, BACKEND_SQLA, BACKEND_DJANGO
from aiida.backends.settings import BACKEND
from aiida.backends.testbase import AiidaTestCase
from aiida.common import utils
from aiida.common.additions.backup_script import backup_setup
from aiida.orm.node import Node
import aiida.utils.json as json
if not is_dbenv_loaded():
load_dbenv()
class TestBackupScriptUnit(AiidaTestCase):
_json_test_input_1 = '{"backup_length_threshold": 2, "periodicity": 2,' + \
' "oldest_object_backedup": "2014-07-18 13:54:53.688484+00:00", ' + \
'"end_date_of_backup": null, "days_to_backup": null, "backup_dir": ' +\
'"/scratch/aiida_user/backupScriptDest"}'
_json_test_input_2 = '{"backup_length_threshold": 2, "periodicity": 2, ' +\
'"oldest_object_backedup": "2014-07-18 13:54:53.688484+00:00", ' + \
'"end_date_of_backup": null, "days_to_backup": null, "backup_dir": ' +\
'"/scratch/aiida_user/backupScriptDest"}'
_json_test_input_3 = '{"backup_length_threshold": 2, "periodicity": 2, ' +\
'"oldest_object_backedup": "2014-07-18 13:54:53.688484+00:00", ' + \
'"end_date_of_backup": null, "days_to_backup": 2, "backup_dir": ' + \
'"/scratch/aiida_user/backupScriptDest"}'
_json_test_input_4 = '{"backup_length_threshold": 2, "periodicity": 2, ' +\
'"oldest_object_backedup": "2014-07-18 13:54:53.688484+00:00", ' + \
'"end_date_of_backup": "2014-07-22 14:54:53.688484+00:00", ' + \
'"days_to_backup": null, "backup_dir": ' + \
'"/scratch/aiida_user/backupScriptDest"}'
_json_test_input_5 = '{"backup_length_threshold": 2, "periodicity": 2, ' +\
'"oldest_object_backedup": "2014-07-18 13:54:53.688484+00:00", ' + \
'"end_date_of_backup": "2014-07-22 14:54:53.688484+00:00", ' + \
'"days_to_backup": 2, "backup_dir": "/scratch/aiida_user/backup"}'
_json_test_input_6 = '{"backup_length_threshold": 2, "periodicity": 2, ' +\
'"oldest_object_backedup": "2014-07-18 13:54:53.688484", ' + \
'"end_date_of_backup": "2014-07-22 14:54:53.688484", ' + \
'"days_to_backup": null, ' \
'"backup_dir": "/scratch/./aiida_user////backup//"}'
def setUp(self):
super(TestBackupScriptUnit, self).setUp()
if not is_dbenv_loaded():
load_dbenv()
if BACKEND == BACKEND_SQLA:
from aiida.common.additions.backup_script.backup_sqlalchemy import Backup
elif BACKEND == BACKEND_DJANGO:
from aiida.common.additions.backup_script.backup_django import Backup
else:
self.skipTest("Unknown backend")
self._backup_setup_inst = Backup("", 2)
def tearDown(self):
super(TestBackupScriptUnit, self).tearDown()
self._backup_setup_inst = None
def test_loading_basic_params_from_file(self):
"""
This method tests the correct loading of the basic _backup_setup_inst
parameters from a JSON string.
"""
backup_variables = json.loads(self._json_test_input_1)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
self._backup_setup_inst._read_backup_info_from_dict(backup_variables)
self.assertEqual(
self._backup_setup_inst._oldest_object_bk,
parse("2014-07-18 13:54:53.688484+00:00"),
"Last _backup_setup_inst start date is not parsed correctly")
# The destination directory of the _backup_setup_inst
self.assertEqual(
self._backup_setup_inst._backup_dir,
"/scratch/aiida_user/backupScriptDest",
"_backup_setup_inst destination directory not parsed correctly")
self.assertEqual(
self._backup_setup_inst._backup_length_threshold,
datetime.timedelta(hours=2),
"_backup_length_threshold not parsed correctly")
self.assertEqual(
self._backup_setup_inst._periodicity,
2,
"_periodicity not parsed correctly")
def test_loading_backup_time_params_from_file_1(self):
"""
This method tests that the _backup_setup_inst limits are correctly
loaded from the JSON string and are correctly set.
In the parsed JSON string, no _backup_setup_inst end limits are set
"""
backup_variables = json.loads(self._json_test_input_2)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
self._backup_setup_inst._read_backup_info_from_dict(backup_variables)
self.assertEqual(
self._backup_setup_inst._days_to_backup,
None,
"_days_to_backup should be None/null but it is not")
self.assertEqual(
self._backup_setup_inst._end_date_of_backup,
None,
"_end_date_of_backup should be None/null but it is not")
self.assertEqual(
self._backup_setup_inst._internal_end_date_of_backup,
None,
"_internal_end_date_of_backup should be None/null but it is not")
def test_loading_backup_time_params_from_file_2(self):
"""
This method tests that the _backup_setup_inst limits are correctly
loaded from the JSON string and are correctly set.
In the parsed JSON string, only the daysToBackup limit is set.
"""
backup_variables = json.loads(self._json_test_input_3)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
self._backup_setup_inst._read_backup_info_from_dict(backup_variables)
self.assertEqual(
self._backup_setup_inst._days_to_backup,
2,
"_days_to_backup should be 2 but it is not")
self.assertEqual(
self._backup_setup_inst._end_date_of_backup,
None,
"_end_date_of_backup should be None/null but it is not")
self.assertEqual(
self._backup_setup_inst._internal_end_date_of_backup,
parse("2014-07-20 13:54:53.688484+00:00"),
"_internal_end_date_of_backup is not the expected one")
def test_loading_backup_time_params_from_file_3(self):
"""
This method tests that the _backup_setup_inst limits are correctly
loaded from the JSON string and are correctly set.
In the parsed JSON string, only the endDateOfBackup limit is set.
"""
backup_variables = json.loads(self._json_test_input_4)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
self._backup_setup_inst._read_backup_info_from_dict(backup_variables)
self.assertEqual(
self._backup_setup_inst._days_to_backup,
None,
"_days_to_backup should be None/null but it is not")
self.assertEqual(
self._backup_setup_inst._end_date_of_backup,
parse("2014-07-22 14:54:53.688484+00:00"),
"_end_date_of_backup should be None/null but it is not")
self.assertEqual(
self._backup_setup_inst._internal_end_date_of_backup,
parse("2014-07-22 14:54:53.688484+00:00"),
"_internal_end_date_of_backup is not the expected one")
def test_loading_backup_time_params_from_file_4(self):
|
def check_full_deserialization_serialization(self, input_string, backup_inst):
input_variables = json.loads(input_string)
backup_inst._ignore_backup_dir_existence_check = True
backup_inst._read_backup_info_from_dict(input_variables)
target_variables = backup_inst._dictionarize_backup_info()
self.assertEqual(input_variables, target_variables,
"The test string {} did not succeed".format(
input_string) +
" the serialization deserialization test.\n" +
"Input variables: {}\n".format(input_variables) +
"Output variables: {}\n".format(target_variables))
def test_full_deserialization_serialization_1(self):
"""
This method tests the correct deserialization / serialization of the
variables that should be stored in a file.
"""
input_string = self._json_test_input_1
backup_inst = self._backup_setup_inst
self.check_full_deserialization_serialization(input_string, backup_inst)
def test_full_deserialization_serialization_2(self):
"""
This method tests the correct deserialization / serialization of the
variables that should be stored in a file.
"""
input_string = self._json_test_input_2
backup_inst = self._backup_setup_inst
self.check_full_deserialization_serialization(input_string, backup_inst)
def test_full_deserialization_serialization_3(self):
"""
This method tests the correct deserialization / serialization of the
variables that should be stored in a file.
"""
input_string = self._json_test_input_3
backup_inst = self._backup_setup_inst
self.check_full_deserialization_serialization(input_string, backup_inst)
def test_full_deserialization_serialization_4(self):
"""
This method tests the correct deserialization / serialization of the
variables that should be stored in a file.
"""
input_string = self._json_test_input_4
backup_inst = self._backup_setup_inst
self.check_full_deserialization_serialization(input_string, backup_inst)
def test_timezone_addition_and_dir_correction(self):
"""
This method tests if the timezone is added correctly to timestamps
that don't have a timezone. Moreover, it checks if the given directory
paths are normalized as expected.
"""
backup_variables = json.loads(self._json_test_input_6)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
self._backup_setup_inst._read_backup_info_from_dict(backup_variables)
self.assertIsNotNone(
self._backup_setup_inst._oldest_object_bk.tzinfo,
"Timezone info should not be none (timestamp: {})."
.format(self._backup_setup_inst._oldest_object_bk))
self.assertIsNotNone(
self._backup_setup_inst._end_date_of_backup.tzinfo,
"Timezone info should not be none (timestamp: {})."
.format(self._backup_setup_inst._end_date_of_backup))
self.assertIsNotNone(
self._backup_setup_inst._internal_end_date_of_backup.tzinfo,
"Timezone info should not be none (timestamp: {})."
.format(self._backup_setup_inst._internal_end_date_of_backup))
# The destination directory of the _backup_setup_inst
self.assertEqual(
self._backup_setup_inst._backup_dir,
"/scratch/aiida_user/backup",
"_backup_setup_inst destination directory is "
"not normalized as expected.")
class TestBackupScriptIntegration(AiidaTestCase):
_aiida_rel_path = ".aiida"
_backup_rel_path = "backup"
_repo_rel_path = "repository"
_bs_instance = backup_setup.BackupSetup()
def test_integration(self):
from aiida.utils.capturing import Capturing
# Fill in the repository with data
self.fill_repo()
try:
# Create a temp folder where the backup files will be placed
# and the backup will be stored
temp_folder = tempfile.mkdtemp()
# Capture the sysout of the following command
with Capturing():
# Create the backup scripts
backup_full_path = self.create_backup_scripts(temp_folder)
# Put the backup folder in the path
sys.path.append(backup_full_path)
# Import the backup script - this action will also run it
# It is assumed that the backup script ends with .py
importlib.import_module(self._bs_instance._script_filename[:-3])
# Check the backup
from aiida import settings
from filecmp import dircmp
import os
from aiida.common.utils import are_dir_trees_equal
source_dir = os.path.join(settings.REPOSITORY_PATH,
self._repo_rel_path)
dest_dir = os.path.join(backup_full_path,
self._bs_instance._file_backup_folder_rel,
self._repo_rel_path)
res, msg = are_dir_trees_equal(source_dir, dest_dir)
self.assertTrue(res, "The backed-up repository has differences to the original one. " + str(msg)
+ ". If the test fails, report it in issue #2134.")
finally:
shutil.rmtree(temp_folder, ignore_errors=True)
def fill_repo(self):
from aiida.orm import JobCalculation, CalculationFactory, Data, DataFactory
extra_name = self.__class__.__name__ + "/test_with_subclasses"
calc_params = {
'computer': self.computer,
'resources': {'num_machines': 1,
'num_mpiprocs_per_machine': 1}
}
TemplateReplacerCalc = CalculationFactory('simpleplugins.templatereplacer')
ParameterData = DataFactory('parameter')
a1 = JobCalculation(**calc_params).store()
# To query only these nodes later
a1.set_extra(extra_name, True)
a2 = TemplateReplacerCalc(**calc_params).store()
# To query only these nodes later
a2.set_extra(extra_name, True)
a3 = Data().store()
a3.set_extra(extra_name, True)
a4 = ParameterData(dict={'a': 'b'}).store()
a4.set_extra(extra_name, True)
a5 = Node().store()
a5.set_extra(extra_name, True)
# I don't set the extras, just to be sure that the filtering works
# The filtering is needed because other tests will put stuff int he DB
a6 = JobCalculation(**calc_params)
a6.store()
a7 = Node()
a7.store()
def create_backup_scripts(self, tmp_folder):
backup_full_path = "{}/{}/{}/".format(tmp_folder, self._aiida_rel_path,
self._backup_rel_path)
# The predefined answers for the setup script
ac = utils.ArrayCounter()
answers = [backup_full_path, # the backup folder path
"", # should the folder be created?
"", # destination folder of the backup
"", # should the folder be created?
"n", # print config explanation?
"", # configure the backup conf file now?
"", # start date of backup?
"", # is it correct?
"", # days to backup?
"", # is it correct?
"", # end date of backup
"", # is it correct?
"1", # periodicity
"", # is it correct?
"0", # threshold?
""] # is it correct?
utils.input = lambda _: answers[ac.array_counter()]
# Run the setup script
self._bs_instance.run()
return backup_full_path
| """
This method tests that the _backup_setup_inst limits are correctly
loaded from the JSON string and are correctly set.
In the parsed JSON string, the endDateOfBackup & daysToBackuplimit
are set which should lead to an exception.
"""
from aiida.common.additions.backup_script.backup_base import BackupError
backup_variables = json.loads(self._json_test_input_5)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
# An exception should be raised because endDateOfBackup
# & daysToBackuplimit have been defined in the same time.
with self.assertRaises(BackupError):
self._backup_setup_inst._read_backup_info_from_dict(backup_variables) |
move_zeros.py | """
Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the
non-zero elements.
Example:
Input: [0,1,0,3,12]
Output: [1,3,12,0,0]
Note:
You must do this in-place without making a copy of the array.
Minimize the total number of operations.
"""
from unittest import TestCase
class Solution:
@staticmethod
def get_next(nums, start, zero=True):
|
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
z = self.get_next(nums, 0, zero=True)
n = self.get_next(nums, 0, zero=False)
while n < len(nums):
if n > z and nums[n] != 0:
nums[z], nums[n] = nums[n], nums[z]
z = self.get_next(nums, z, zero=True)
n = self.get_next(nums, n, zero=False)
continue
else:
n += 1
class TestSolution(TestCase):
sol = Solution()
def test_0(self):
nums = [0, 1, 2, 3, 4]
self.sol.moveZeroes(nums)
print("result: {}".format(nums))
self.assertEqual([1, 2, 3, 4, 0], nums)
def test_1(self):
nums = [0, 1, 0, 2, 0, 3, 0, 1]
self.sol.moveZeroes(nums)
print("result: {}".format(nums))
self.assertEqual([1, 2, 3, 1, 0, 0, 0, 0], nums)
def test_3(self):
nums = [0,1,0,3,12]
self.sol.moveZeroes(nums)
print("result: {}".format(nums))
self.assertEqual([1, 3, 12, 0, 0], nums)
def test_4(self):
nums = [1]
self.sol.moveZeroes(nums)
print("result: {}".format(nums))
self.assertEqual([1], nums)
def test_5(self):
nums = [1, -2, 0, 0, 3, 0, 4, 0]
self.sol.moveZeroes(nums)
print("result: {}".format(nums))
self.assertEqual([1, -2, 3, 4, 0, 0, 0, 0], nums)
def test_6(self):
nums = [-959151711,623836953,209446690,-1950418142,1339915067,-733626417,481171539,-2125997010,-1225423476,1462109565,147434687,-1800073781,-1431212205,-450443973,50097298,753533734,-747189404,-2070885638,0,-1484353894,-340296594,-2133744570,619639811,-1626162038,669689561,0,112220218,502447212,-787793179,0,-726846372,-1611013491,204107194,1605165582,-566891128,2082852116,0,532995238,-1502590712,0,2136989777,-2031153343,371398938,-1907397429,342796391,609166045,-2007448660,-1096076344,-323570318,0,-2082980371,2129956379,-243553361,-1549960929,1502383415,0,-1394618779,694799815,78595689,-1439173023,-1416578800,685225786,-333502212,-1181308536,-380569313,772035354,0,-915266376,663709718,1443496021,-777017729,-883300731,-387828385,1907473488,-725483724,-972961871,-1255712537,383120918,1383877998,1722751914,0,-1156050682,1952527902,-560244497,1304305692,1173974542,-1313227247,-201476579,-298899493,-1828496581,-1724396350,1933643204,1531804925,1728655262,-955565449,0,-69843702,-461760848,268336768,1446130876]
self.sol.moveZeroes(nums)
print("result: {}".format(nums))
self.assertEqual([-959151711,623836953,209446690,-1950418142,1339915067,-733626417,481171539,-2125997010,-1225423476,1462109565,147434687,-1800073781,-1431212205,-450443973,50097298,753533734,-747189404,-2070885638,-1484353894,-340296594,-2133744570,619639811,-1626162038,669689561,112220218,502447212,-787793179,-726846372,-1611013491,204107194,1605165582,-566891128,2082852116,532995238,-1502590712,2136989777,-2031153343,371398938,-1907397429,342796391,609166045,-2007448660,-1096076344,-323570318,-2082980371,2129956379,-243553361,-1549960929,1502383415,-1394618779,694799815,78595689,-1439173023,-1416578800,685225786,-333502212,-1181308536,-380569313,772035354,-915266376,663709718,1443496021,-777017729,-883300731,-387828385,1907473488,-725483724,-972961871,-1255712537,383120918,1383877998,1722751914,-1156050682,1952527902,-560244497,1304305692,1173974542,-1313227247,-201476579,-298899493,-1828496581,-1724396350,1933643204,1531804925,1728655262,-955565449,-69843702,-461760848,268336768,1446130876,0,0,0,0,0,0,0,0,0,0], nums)
| for idx in range(start, len(nums)):
if zero and nums[idx] == 0:
return idx
if not zero and nums[idx] != zero:
return idx
return len(nums) |
rename_all.py | """
rename_all.py
Usage:
$ poetry install
$ poetry run python rename_all.py
"""
from typing import Optional
from serde import serde
from serde.json import from_json, to_json
@serde(rename_all='pascalcase')
class Foo:
name: str
no: Optional[int] = None
def | ():
f = Foo('Pikachu')
print(f"Into Json: {to_json(f)}")
s = '{"Name": "Pikachu", "No": 25}'
print(f"From Json: {from_json(Foo, s)}")
if __name__ == '__main__':
main()
| main |
dataset_mapper.py | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import torch
from fvcore.common.file_io import PathManager
from detectron2.data import MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from .structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData
class DatasetMapper:
"""
A customized version of `detectron2.data.DatasetMapper`
"""
def __init__(self, cfg, is_train=True):
self.tfm_gens = utils.build_transform_gen(cfg, is_train)
# fmt: off
self.img_format = cfg.INPUT.FORMAT
self.mask_on = cfg.MODEL.MASK_ON
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
self.densepose_on = cfg.MODEL.DENSEPOSE_ON
assert not cfg.MODEL.LOAD_PROPOSALS, "not supported yet"
# fmt: on
if self.keypoint_on and is_train:
# Flip only makes sense in training
self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
else:
self.keypoint_hflip_indices = None
if self.densepose_on:
densepose_transform_srcs = [
MetadataCatalog.get(ds).densepose_transform_src
for ds in cfg.DATASETS.TRAIN + cfg.DATASETS.TEST
]
assert len(densepose_transform_srcs) > 0
# TODO: check that DensePose transformation data is the same for
# all the data. Otherwise one would have to pass DB ID with
# each entry to select proper transformation data. For now, since
# all DensePose annotated data uses the same data semantics, we
# omit this check.
densepose_transform_data_fpath = PathManager.get_local_path(densepose_transform_srcs[0])
self.densepose_transform_data = DensePoseTransformData.load(
densepose_transform_data_fpath
)
self.is_train = is_train
def __call__(self, dataset_dict):
|
def _transform_densepose(self, annotation, transforms):
if not self.densepose_on:
return annotation
# Handle densepose annotations
is_valid, reason_not_valid = DensePoseDataRelative.validate_annotation(annotation)
if is_valid:
densepose_data = DensePoseDataRelative(annotation, cleanup=True)
densepose_data.apply_transform(transforms, self.densepose_transform_data)
annotation["densepose"] = densepose_data
else:
# logger = logging.getLogger(__name__)
# logger.debug("Could not load DensePose annotation: {}".format(reason_not_valid))
DensePoseDataRelative.cleanup_annotation(annotation)
# NOTE: annotations for certain instances may be unavailable.
# 'None' is accepted by the DensePostList data structure.
annotation["densepose"] = None
return annotation
| """
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
image_shape = image.shape[:2] # h, w
dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))
if not self.is_train:
dataset_dict.pop("annotations", None)
return dataset_dict
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
# USER: Don't call transpose_densepose if you don't need
annos = [
self._transform_densepose(
utils.transform_instance_annotations(
obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
),
transforms,
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(annos, image_shape)
if len(annos) and "densepose" in annos[0]:
gt_densepose = [obj["densepose"] for obj in annos]
instances.gt_densepose = DensePoseList(gt_densepose, instances.gt_boxes, image_shape)
dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()]
return dataset_dict |
sip_from.go | // Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
// Copyright (c) 2006-2015 Sippy Software, Inc. All rights reserved.
// Copyright (c) 2015 Andrii Pylypenko. All rights reserved.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package sippy_header
import (
"sippy/net"
"sippy/conf"
)
type SipFrom struct {
compactName
*sipAddressHF
}
var _sip_from_name compactName = newCompactName("From", "f")
func CreateSipFrom(body string) []SipHeader {
addresses := createSipAddressHFs(body)
rval := make([]SipHeader, len(addresses))
for i, address := range addresses {
rval[i] = &SipFrom{
compactName : _sip_from_name,
sipAddressHF : address,
}
}
return rval
}
func NewSipFrom(address *SipAddress, config sippy_conf.Config) *SipFrom {
if address == nil |
return &SipFrom{
compactName : _sip_from_name,
sipAddressHF : newSipAddressHF(address),
}
}
func (self *SipFrom) String() string {
return self.LocalStr(nil, false)
}
func (self *SipFrom) LocalStr(hostport *sippy_net.HostPort, compact bool) string {
if compact {
return self.CompactName() + ": " + self.LocalStringBody(hostport)
}
return self.Name() + ": " + self.LocalStringBody(hostport)
}
func (self *SipFrom) GetCopy() *SipFrom {
return &SipFrom{
compactName : _sip_from_name,
sipAddressHF : self.sipAddressHF.getCopy(),
}
}
func (self *SipFrom) GetCopyAsIface() SipHeader {
return self.GetCopy()
}
| {
address = NewSipAddress("Anonymous", NewSipURL("" /* username */,
config.GetMyAddress(),
config.GetMyPort(),
false))
} |
uxx.rs | #![no_main]
#![no_std]
use core::sync::atomic::{AtomicU16, AtomicU32, AtomicU8, AtomicUsize, Ordering};
use common::W;
use cortex_m::interrupt;
use cortex_m_rt::{entry, exception};
use ufmt::uwrite;
static A: AtomicU8 = AtomicU8::new(0);
static B: AtomicU16 = AtomicU16::new(0);
static C: AtomicU32 = AtomicU32::new(0);
static D: AtomicUsize = AtomicUsize::new(0);
static mut E: u64 = 0;
static mut F: u128 = 0;
#[entry]
fn main() -> ! {
loop {
A.fetch_add(1, Ordering::Relaxed);
B.fetch_add(1, Ordering::Relaxed);
C.fetch_add(1, Ordering::Relaxed);
D.fetch_add(1, Ordering::Relaxed);
interrupt::free(|_| unsafe {
E += 1;
F += 1;
})
}
}
#[exception]
fn PendSV() | {
uwrite!(&mut W, "{}", A.load(Ordering::Relaxed)).unwrap();
uwrite!(&mut W, "{}", B.load(Ordering::Relaxed)).unwrap();
uwrite!(&mut W, "{}", C.load(Ordering::Relaxed)).unwrap();
uwrite!(&mut W, "{}", D.load(Ordering::Relaxed)).unwrap();
unsafe {
uwrite!(&mut W, "{}", E).unwrap();
uwrite!(&mut W, "{}", F).unwrap();
}
} |
|
containerRegistryManagementClient.ts | /*
* Copyright (c) Microsoft Corporation.
* Licensed under the MIT License.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
* Changes may cause incorrect behavior and will be lost if the code is regenerated.
*/
import * as coreClient from "@azure/core-client";
import * as coreAuth from "@azure/core-auth";
import {
ConnectedRegistriesImpl,
ExportPipelinesImpl,
RegistriesImpl,
ImportPipelinesImpl,
OperationsImpl,
PipelineRunsImpl,
PrivateEndpointConnectionsImpl,
ReplicationsImpl,
ScopeMapsImpl,
TokensImpl,
WebhooksImpl,
AgentPoolsImpl,
RunsImpl,
TaskRunsImpl,
TasksImpl
} from "./operations";
import {
ConnectedRegistries,
ExportPipelines,
Registries,
ImportPipelines,
Operations,
PipelineRuns,
PrivateEndpointConnections,
Replications,
ScopeMaps,
Tokens,
Webhooks,
AgentPools,
Runs,
TaskRuns,
Tasks
} from "./operationsInterfaces";
import { ContainerRegistryManagementClientOptionalParams } from "./models";
export class | extends coreClient.ServiceClient {
$host: string;
subscriptionId: string;
/**
* Initializes a new instance of the ContainerRegistryManagementClient class.
* @param credentials Subscription credentials which uniquely identify client subscription.
* @param subscriptionId The Microsoft Azure subscription ID.
* @param options The parameter options
*/
constructor(
credentials: coreAuth.TokenCredential,
subscriptionId: string,
options?: ContainerRegistryManagementClientOptionalParams
) {
if (credentials === undefined) {
throw new Error("'credentials' cannot be null");
}
if (subscriptionId === undefined) {
throw new Error("'subscriptionId' cannot be null");
}
// Initializing default values for options
if (!options) {
options = {};
}
const defaults: ContainerRegistryManagementClientOptionalParams = {
requestContentType: "application/json; charset=utf-8",
credential: credentials
};
const packageDetails = `azsdk-js-arm-containerregistry/10.1.0-beta.2`;
const userAgentPrefix =
options.userAgentOptions && options.userAgentOptions.userAgentPrefix
? `${options.userAgentOptions.userAgentPrefix} ${packageDetails}`
: `${packageDetails}`;
if (!options.credentialScopes) {
options.credentialScopes = ["https://management.azure.com/.default"];
}
const optionsWithDefaults = {
...defaults,
...options,
userAgentOptions: {
userAgentPrefix
},
baseUri: options.endpoint || "https://management.azure.com"
};
super(optionsWithDefaults);
// Parameter assignments
this.subscriptionId = subscriptionId;
// Assigning values to Constant parameters
this.$host = options.$host || "https://management.azure.com";
this.connectedRegistries = new ConnectedRegistriesImpl(this);
this.exportPipelines = new ExportPipelinesImpl(this);
this.registries = new RegistriesImpl(this);
this.importPipelines = new ImportPipelinesImpl(this);
this.operations = new OperationsImpl(this);
this.pipelineRuns = new PipelineRunsImpl(this);
this.privateEndpointConnections = new PrivateEndpointConnectionsImpl(this);
this.replications = new ReplicationsImpl(this);
this.scopeMaps = new ScopeMapsImpl(this);
this.tokens = new TokensImpl(this);
this.webhooks = new WebhooksImpl(this);
this.agentPools = new AgentPoolsImpl(this);
this.runs = new RunsImpl(this);
this.taskRuns = new TaskRunsImpl(this);
this.tasks = new TasksImpl(this);
}
connectedRegistries: ConnectedRegistries;
exportPipelines: ExportPipelines;
registries: Registries;
importPipelines: ImportPipelines;
operations: Operations;
pipelineRuns: PipelineRuns;
privateEndpointConnections: PrivateEndpointConnections;
replications: Replications;
scopeMaps: ScopeMaps;
tokens: Tokens;
webhooks: Webhooks;
agentPools: AgentPools;
runs: Runs;
taskRuns: TaskRuns;
tasks: Tasks;
}
| ContainerRegistryManagementClient |
flomaster.py | from helpers import *
from plots import *
from col_type_detector import *
# from configs import *
import warnings
def generate_flomaster_plot(df, x="None", y=[], group_by=None, plot_type=None, x_axis=None, y_axis=None, title=None):
"""
Function generates interactive plot for given dataframe and columns
Args:
df (pd.DataFrame)
x (str): name of the column to use as x_axis
y (str or list): either one column or list of columns to plot as y axis
group_by (str): column by which to group data (default is None)
plot_type (str): possible values vary depending on input data, the list is`
ONE_NUMERIC = ['Histogram', 'Distplot']
ONE_CATEOGIRCAL = ['Donut', 'Pie', 'Histogram']
ONE_TEXT = ['Wordcloud']
TWO_NUMERIC = ["Scatter", "Scatter plot with margins", "2D density plot", "Distplot", "Histogram", "Basic Stats"]
TWO_NUMERIC_SORTED = ['Connected Scatter', "Area plot", "Line plot"]
ONE_CATEOGIRCAL_ONE_NUMERICAL = ['Box', "Violin", "Basic Stats"]
TWO_CATEGORICAL = ['Cross tab', "Stacked bar"]
ONE_DATETIME_ONE_NUMERIC = ['Connected Scatter']
x_axis (str): defaults to x columns name
y_axis (str): defaults to y, if y is a list then to the first element of y
title (str): defaults to f"{x_axis} vs {y_axis}"
Note:
Some illogical results might occur in case of column_type_detector classifies some
columns incorrectly, also note that this package is in a very early stage of development
Raises:
ValueError: if plot_type is not from allowed list
Returns:
plotly figure object
"""
if type(y) == str:
y = [y]
data_types = get_column_types(df, num_unique_categories=2)
if x_axis is None:
x_axis = x
if y != [] and y_axis is None:
y_axis = y[0]
if title is None:
title = f"{x_axis} vs {y_axis}"
x_dtype = get_data_type_for_given_feature(data_types, x)
y_dtype = get_data_type_for_given_feature(data_types, y[0])
# print(x)
# print(y)
# print(x_dtype)
# print(y_dtype)
# one feature
if x != "None" and y[0] == 'None':
if x_dtype == 'numeric': # 1
possible_graphs = ONE_NUMERIC
if (plot_type is not None) and (plot_type not in possible_graphs):
raise ValueError(f"Please select one from {possible_graphs}")
else:
fig = one_numeric(df, x, group_by, plot_type)
add_labels_to_fig(fig, x_axis, y_axis, title)
return fig
if x_dtype == 'categorical': # 2
possible_graphs = ONE_CATEOGIRCAL
if (plot_type is not None) and (plot_type not in possible_graphs):
raise ValueError(f"Please select one from {possible_graphs}")
else:
fig = one_categoric(df, x, group_by, plot_type)
add_labels_to_fig(fig, x_axis, y_axis, title)
return fig
if x_dtype == 'texts': # 3
possible_graphs = ONE_TEXT
if (plot_type is not None) and (plot_type not in possible_graphs):
raise ValueError(f"Please select one from {possible_graphs}")
else:
fig = one_textual(df, x)
return fig
# two features
if x != "None" and y[0] != 'None':
# two numeric
if x_dtype == "numeric" and y_dtype == 'numeric': # 4
global TWO_NUMERIC
if df[x].to_list() == sorted(df[x].to_list()):
TWO_NUMERIC += TWO_NUMERIC_SORTED
possible_graphs = TWO_NUMERIC
if len(df)>2000 and plot_type in ["Histogram", "Scatter"]:
warnings.warn('**Data has too many rows, we suggest plotting \
with one of the following: "Scatter plot with margins", "2D density plot", "Distplot"**')
if len(df)<2000 and plot_type not in ["Histogram", "Scatter", "Basic Stats"]:
|
if (plot_type is not None) and (plot_type not in possible_graphs):
raise ValueError(f"Please select one from {possible_graphs}")
else:
fig = two_numeric(df, x, y[0], group_by, plot_type)
if plot_type in ["Basic Stats",'Histogram']:
if y_axis == y[0]:
y_axis = ''
if x_axis == x:
x_axis = ''
add_labels_to_fig(fig, x_axis, y_axis, title)
return fig
#one numeric one categoric # 5
if x_dtype == "categorical" and y_dtype == 'numeric':
possible_graphs = ONE_CATEOGIRCAL_ONE_NUMERICAL
if (plot_type is not None) and (plot_type not in possible_graphs):
raise ValueError(f"Please select one from {possible_graphs}")
else:
fig = one_numeric_one_categorical(df, x, y, group_by, plot_type)
add_labels_to_fig(fig, x_axis, y_axis, title)
return fig
# two categoricals
if x_dtype == "categorical" and y_dtype == 'categorical':
possible_graphs = TWO_CATEGORICAL
if (plot_type is not None) and (plot_type not in possible_graphs):
raise ValueError(f"Please select one from {possible_graphs}")
else:
if plot_type == 'Cross tab':
fig = two_categorical(df, x, y[0], plot_type)
elif plot_type == 'Stacked bar':
fig = two_categorical(df, x, y[0], plot_type)
add_labels_to_fig(fig, x_axis, y_axis, title)
return fig
# one datetime one numeric
if x_dtype == "datetime" and y_dtype == 'numeric':
global ONE_DATETIME_ONE_NUMERIC
if check_list_in_list(list(df.columns), ['Date', "Open", "High", "Low", "Close"]):
ONE_DATETIME_ONE_NUMERIC += ["Stock price"]
possible_graphs = ONE_DATETIME_ONE_NUMERIC
if (plot_type is not None) and (plot_type not in possible_graphs):
raise ValueError(f"Please select one from {possible_graphs}")
else:
fig = one_datetime_one_numeric(df, x, y, group_by,plot_type)
add_labels_to_fig(fig, x_axis, y_axis, title)
return fig
return "Something went wrong, contact team Flomaster"
| warnings.warn('**Data has few rows, we suggest plotting \
with one of the following: "Histogram", "Scatter"**') |
models.rs | #![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyTrackedResourcesQueryResults {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PolicyTrackedResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyTrackedResource {
#[serde(rename = "trackedResourceId", default, skip_serializing_if = "Option::is_none")]
pub tracked_resource_id: Option<String>,
#[serde(rename = "policyDetails", default, skip_serializing_if = "Option::is_none")]
pub policy_details: Option<PolicyDetails>,
#[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")]
pub created_by: Option<TrackedResourceModificationDetails>,
#[serde(rename = "lastModifiedBy", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by: Option<TrackedResourceModificationDetails>,
#[serde(rename = "lastUpdateUtc", default, skip_serializing_if = "Option::is_none")]
pub last_update_utc: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct | {
#[serde(rename = "policyDetails", default, skip_serializing_if = "Option::is_none")]
pub policy_details: Option<PolicyDetails>,
#[serde(rename = "deploymentId", default, skip_serializing_if = "Option::is_none")]
pub deployment_id: Option<String>,
#[serde(rename = "deploymentTime", default, skip_serializing_if = "Option::is_none")]
pub deployment_time: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyDetails {
#[serde(rename = "policyDefinitionId", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_id: Option<String>,
#[serde(rename = "policyAssignmentId", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_id: Option<String>,
#[serde(rename = "policyAssignmentDisplayName", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_display_name: Option<String>,
#[serde(rename = "policyAssignmentScope", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_scope: Option<String>,
#[serde(rename = "policySetDefinitionId", default, skip_serializing_if = "Option::is_none")]
pub policy_set_definition_id: Option<String>,
#[serde(rename = "policyDefinitionReferenceId", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_reference_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryFailure {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<query_failure::Error>,
}
pub mod query_failure {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Error {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RemediationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Remediation>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RemediationDeploymentsListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<RemediationDeployment>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RemediationDeployment {
#[serde(rename = "remediatedResourceId", default, skip_serializing_if = "Option::is_none")]
pub remediated_resource_id: Option<String>,
#[serde(rename = "deploymentId", default, skip_serializing_if = "Option::is_none")]
pub deployment_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(rename = "resourceLocation", default, skip_serializing_if = "Option::is_none")]
pub resource_location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorDefinition>,
#[serde(rename = "createdOn", default, skip_serializing_if = "Option::is_none")]
pub created_on: Option<String>,
#[serde(rename = "lastUpdatedOn", default, skip_serializing_if = "Option::is_none")]
pub last_updated_on: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Remediation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RemediationProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RemediationProperties {
#[serde(rename = "policyAssignmentId", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_id: Option<String>,
#[serde(rename = "policyDefinitionReferenceId", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_reference_id: Option<String>,
#[serde(rename = "resourceDiscoveryMode", default, skip_serializing_if = "Option::is_none")]
pub resource_discovery_mode: Option<remediation_properties::ResourceDiscoveryMode>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "createdOn", default, skip_serializing_if = "Option::is_none")]
pub created_on: Option<String>,
#[serde(rename = "lastUpdatedOn", default, skip_serializing_if = "Option::is_none")]
pub last_updated_on: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub filters: Option<RemediationFilters>,
#[serde(rename = "deploymentStatus", default, skip_serializing_if = "Option::is_none")]
pub deployment_status: Option<RemediationDeploymentSummary>,
}
pub mod remediation_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ResourceDiscoveryMode {
ExistingNonCompliant,
ReEvaluateCompliance,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RemediationFilters {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub locations: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RemediationDeploymentSummary {
#[serde(rename = "totalDeployments", default, skip_serializing_if = "Option::is_none")]
pub total_deployments: Option<i64>,
#[serde(rename = "successfulDeployments", default, skip_serializing_if = "Option::is_none")]
pub successful_deployments: Option<i64>,
#[serde(rename = "failedDeployments", default, skip_serializing_if = "Option::is_none")]
pub failed_deployments: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorDefinition>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorDefinition {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ErrorDefinition>,
#[serde(rename = "additionalInfo", default, skip_serializing_if = "Vec::is_empty")]
pub additional_info: Vec<TypedErrorInfo>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TypedErrorInfo {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub info: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyEventsQueryResults {
#[serde(rename = "@odata.context", default, skip_serializing_if = "Option::is_none")]
pub odata_context: Option<String>,
#[serde(rename = "@odata.count", default, skip_serializing_if = "Option::is_none")]
pub odata_count: Option<i32>,
#[serde(rename = "@odata.nextLink", default, skip_serializing_if = "Option::is_none")]
pub odata_next_link: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PolicyEvent>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyEvent {
#[serde(rename = "@odata.id", default, skip_serializing_if = "Option::is_none")]
pub odata_id: Option<String>,
#[serde(rename = "@odata.context", default, skip_serializing_if = "Option::is_none")]
pub odata_context: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timestamp: Option<String>,
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
#[serde(rename = "policyAssignmentId", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_id: Option<String>,
#[serde(rename = "policyDefinitionId", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_id: Option<String>,
#[serde(rename = "effectiveParameters", default, skip_serializing_if = "Option::is_none")]
pub effective_parameters: Option<String>,
#[serde(rename = "isCompliant", default, skip_serializing_if = "Option::is_none")]
pub is_compliant: Option<bool>,
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<String>,
#[serde(rename = "resourceType", default, skip_serializing_if = "Option::is_none")]
pub resource_type: Option<String>,
#[serde(rename = "resourceLocation", default, skip_serializing_if = "Option::is_none")]
pub resource_location: Option<String>,
#[serde(rename = "resourceGroup", default, skip_serializing_if = "Option::is_none")]
pub resource_group: Option<String>,
#[serde(rename = "resourceTags", default, skip_serializing_if = "Option::is_none")]
pub resource_tags: Option<String>,
#[serde(rename = "policyAssignmentName", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_name: Option<String>,
#[serde(rename = "policyAssignmentOwner", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_owner: Option<String>,
#[serde(rename = "policyAssignmentParameters", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_parameters: Option<String>,
#[serde(rename = "policyAssignmentScope", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_scope: Option<String>,
#[serde(rename = "policyDefinitionName", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_name: Option<String>,
#[serde(rename = "policyDefinitionAction", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_action: Option<String>,
#[serde(rename = "policyDefinitionCategory", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_category: Option<String>,
#[serde(rename = "policySetDefinitionId", default, skip_serializing_if = "Option::is_none")]
pub policy_set_definition_id: Option<String>,
#[serde(rename = "policySetDefinitionName", default, skip_serializing_if = "Option::is_none")]
pub policy_set_definition_name: Option<String>,
#[serde(rename = "policySetDefinitionOwner", default, skip_serializing_if = "Option::is_none")]
pub policy_set_definition_owner: Option<String>,
#[serde(rename = "policySetDefinitionCategory", default, skip_serializing_if = "Option::is_none")]
pub policy_set_definition_category: Option<String>,
#[serde(rename = "policySetDefinitionParameters", default, skip_serializing_if = "Option::is_none")]
pub policy_set_definition_parameters: Option<String>,
#[serde(rename = "managementGroupIds", default, skip_serializing_if = "Option::is_none")]
pub management_group_ids: Option<String>,
#[serde(rename = "policyDefinitionReferenceId", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_reference_id: Option<String>,
#[serde(rename = "complianceState", default, skip_serializing_if = "Option::is_none")]
pub compliance_state: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "principalOid", default, skip_serializing_if = "Option::is_none")]
pub principal_oid: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub components: Vec<ComponentEventDetails>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComponentEventDetails {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timestamp: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "principalOid", default, skip_serializing_if = "Option::is_none")]
pub principal_oid: Option<String>,
#[serde(rename = "policyDefinitionAction", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_action: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MetadataDocument {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyStatesQueryResults {
#[serde(rename = "@odata.context", default, skip_serializing_if = "Option::is_none")]
pub odata_context: Option<String>,
#[serde(rename = "@odata.count", default, skip_serializing_if = "Option::is_none")]
pub odata_count: Option<i32>,
#[serde(rename = "@odata.nextLink", default, skip_serializing_if = "Option::is_none")]
pub odata_next_link: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PolicyState>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyState {
#[serde(rename = "@odata.id", default, skip_serializing_if = "Option::is_none")]
pub odata_id: Option<String>,
#[serde(rename = "@odata.context", default, skip_serializing_if = "Option::is_none")]
pub odata_context: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timestamp: Option<String>,
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
#[serde(rename = "policyAssignmentId", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_id: Option<String>,
#[serde(rename = "policyDefinitionId", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_id: Option<String>,
#[serde(rename = "effectiveParameters", default, skip_serializing_if = "Option::is_none")]
pub effective_parameters: Option<String>,
#[serde(rename = "isCompliant", default, skip_serializing_if = "Option::is_none")]
pub is_compliant: Option<bool>,
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<String>,
#[serde(rename = "resourceType", default, skip_serializing_if = "Option::is_none")]
pub resource_type: Option<String>,
#[serde(rename = "resourceLocation", default, skip_serializing_if = "Option::is_none")]
pub resource_location: Option<String>,
#[serde(rename = "resourceGroup", default, skip_serializing_if = "Option::is_none")]
pub resource_group: Option<String>,
#[serde(rename = "resourceTags", default, skip_serializing_if = "Option::is_none")]
pub resource_tags: Option<String>,
#[serde(rename = "policyAssignmentName", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_name: Option<String>,
#[serde(rename = "policyAssignmentOwner", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_owner: Option<String>,
#[serde(rename = "policyAssignmentParameters", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_parameters: Option<String>,
#[serde(rename = "policyAssignmentScope", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_scope: Option<String>,
#[serde(rename = "policyDefinitionName", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_name: Option<String>,
#[serde(rename = "policyDefinitionAction", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_action: Option<String>,
#[serde(rename = "policyDefinitionCategory", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_category: Option<String>,
#[serde(rename = "policySetDefinitionId", default, skip_serializing_if = "Option::is_none")]
pub policy_set_definition_id: Option<String>,
#[serde(rename = "policySetDefinitionName", default, skip_serializing_if = "Option::is_none")]
pub policy_set_definition_name: Option<String>,
#[serde(rename = "policySetDefinitionOwner", default, skip_serializing_if = "Option::is_none")]
pub policy_set_definition_owner: Option<String>,
#[serde(rename = "policySetDefinitionCategory", default, skip_serializing_if = "Option::is_none")]
pub policy_set_definition_category: Option<String>,
#[serde(rename = "policySetDefinitionParameters", default, skip_serializing_if = "Option::is_none")]
pub policy_set_definition_parameters: Option<String>,
#[serde(rename = "managementGroupIds", default, skip_serializing_if = "Option::is_none")]
pub management_group_ids: Option<String>,
#[serde(rename = "policyDefinitionReferenceId", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_reference_id: Option<String>,
#[serde(rename = "complianceState", default, skip_serializing_if = "Option::is_none")]
pub compliance_state: Option<String>,
#[serde(rename = "policyEvaluationDetails", default, skip_serializing_if = "Option::is_none")]
pub policy_evaluation_details: Option<PolicyEvaluationDetails>,
#[serde(rename = "policyDefinitionGroupNames", default, skip_serializing_if = "Vec::is_empty")]
pub policy_definition_group_names: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub components: Vec<ComponentStateDetails>,
#[serde(rename = "policyDefinitionVersion", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_version: Option<String>,
#[serde(rename = "policySetDefinitionVersion", default, skip_serializing_if = "Option::is_none")]
pub policy_set_definition_version: Option<String>,
#[serde(rename = "policyAssignmentVersion", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_version: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyEvaluationDetails {
#[serde(rename = "evaluatedExpressions", default, skip_serializing_if = "Vec::is_empty")]
pub evaluated_expressions: Vec<ExpressionEvaluationDetails>,
#[serde(rename = "ifNotExistsDetails", default, skip_serializing_if = "Option::is_none")]
pub if_not_exists_details: Option<IfNotExistsEvaluationDetails>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComponentStateDetails {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timestamp: Option<String>,
#[serde(rename = "complianceState", default, skip_serializing_if = "Option::is_none")]
pub compliance_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExpressionEvaluationDetails {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub result: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub expression: Option<String>,
#[serde(rename = "expressionKind", default, skip_serializing_if = "Option::is_none")]
pub expression_kind: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
#[serde(rename = "expressionValue", default, skip_serializing_if = "Option::is_none")]
pub expression_value: Option<serde_json::Value>,
#[serde(rename = "targetValue", default, skip_serializing_if = "Option::is_none")]
pub target_value: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operator: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IfNotExistsEvaluationDetails {
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
#[serde(rename = "totalResources", default, skip_serializing_if = "Option::is_none")]
pub total_resources: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SummarizeResults {
#[serde(rename = "@odata.context", default, skip_serializing_if = "Option::is_none")]
pub odata_context: Option<String>,
#[serde(rename = "@odata.count", default, skip_serializing_if = "Option::is_none")]
pub odata_count: Option<i32>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Summary>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Summary {
#[serde(rename = "@odata.id", default, skip_serializing_if = "Option::is_none")]
pub odata_id: Option<String>,
#[serde(rename = "@odata.context", default, skip_serializing_if = "Option::is_none")]
pub odata_context: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub results: Option<SummaryResults>,
#[serde(rename = "policyAssignments", default, skip_serializing_if = "Vec::is_empty")]
pub policy_assignments: Vec<PolicyAssignmentSummary>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SummaryResults {
#[serde(rename = "queryResultsUri", default, skip_serializing_if = "Option::is_none")]
pub query_results_uri: Option<String>,
#[serde(rename = "nonCompliantResources", default, skip_serializing_if = "Option::is_none")]
pub non_compliant_resources: Option<i32>,
#[serde(rename = "nonCompliantPolicies", default, skip_serializing_if = "Option::is_none")]
pub non_compliant_policies: Option<i32>,
#[serde(rename = "resourceDetails", default, skip_serializing_if = "Vec::is_empty")]
pub resource_details: Vec<ComplianceDetail>,
#[serde(rename = "policyDetails", default, skip_serializing_if = "Vec::is_empty")]
pub policy_details: Vec<ComplianceDetail>,
#[serde(rename = "policyGroupDetails", default, skip_serializing_if = "Vec::is_empty")]
pub policy_group_details: Vec<ComplianceDetail>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComplianceDetail {
#[serde(rename = "complianceState", default, skip_serializing_if = "Option::is_none")]
pub compliance_state: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyAssignmentSummary {
#[serde(rename = "policyAssignmentId", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_id: Option<String>,
#[serde(rename = "policySetDefinitionId", default, skip_serializing_if = "Option::is_none")]
pub policy_set_definition_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub results: Option<SummaryResults>,
#[serde(rename = "policyDefinitions", default, skip_serializing_if = "Vec::is_empty")]
pub policy_definitions: Vec<PolicyDefinitionSummary>,
#[serde(rename = "policyGroups", default, skip_serializing_if = "Vec::is_empty")]
pub policy_groups: Vec<PolicyGroupSummary>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyDefinitionSummary {
#[serde(rename = "policyDefinitionId", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_id: Option<String>,
#[serde(rename = "policyDefinitionReferenceId", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_reference_id: Option<String>,
#[serde(rename = "policyDefinitionGroupNames", default, skip_serializing_if = "Vec::is_empty")]
pub policy_definition_group_names: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub effect: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub results: Option<SummaryResults>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyGroupSummary {
#[serde(rename = "policyGroupName", default, skip_serializing_if = "Option::is_none")]
pub policy_group_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub results: Option<SummaryResults>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationsListResults {
#[serde(rename = "@odata.count", default, skip_serializing_if = "Option::is_none")]
pub odata_count: Option<i32>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Operation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
}
pub mod operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Display {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyMetadata {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PolicyMetadataProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyMetadataSlimProperties {
#[serde(rename = "metadataId", default, skip_serializing_if = "Option::is_none")]
pub metadata_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub category: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub title: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub owner: Option<String>,
#[serde(rename = "additionalContentUrl", default, skip_serializing_if = "Option::is_none")]
pub additional_content_url: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub metadata: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyMetadataProperties {
#[serde(flatten)]
pub policy_metadata_slim_properties: PolicyMetadataSlimProperties,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub requirements: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SlimPolicyMetadata {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PolicyMetadataSlimProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyMetadataCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SlimPolicyMetadata>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CheckRestrictionsRequest {
#[serde(rename = "resourceDetails")]
pub resource_details: CheckRestrictionsResourceDetails,
#[serde(rename = "pendingFields", default, skip_serializing_if = "Vec::is_empty")]
pub pending_fields: Vec<PendingField>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CheckRestrictionsResourceDetails {
#[serde(rename = "resourceContent")]
pub resource_content: serde_json::Value,
#[serde(rename = "apiVersion", default, skip_serializing_if = "Option::is_none")]
pub api_version: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub scope: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PendingField {
pub field: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub values: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CheckRestrictionsResult {
#[serde(rename = "fieldRestrictions", default, skip_serializing_if = "Vec::is_empty")]
pub field_restrictions: Vec<FieldRestrictions>,
#[serde(rename = "contentEvaluationResult", default, skip_serializing_if = "Option::is_none")]
pub content_evaluation_result: Option<check_restrictions_result::ContentEvaluationResult>,
}
pub mod check_restrictions_result {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContentEvaluationResult {
#[serde(rename = "policyEvaluations", default, skip_serializing_if = "Vec::is_empty")]
pub policy_evaluations: Vec<PolicyEvaluationResult>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FieldRestrictions {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub field: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub restrictions: Vec<FieldRestriction>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FieldRestriction {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub result: Option<field_restriction::Result>,
#[serde(rename = "defaultValue", default, skip_serializing_if = "Option::is_none")]
pub default_value: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub values: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub policy: Option<PolicyReference>,
}
pub mod field_restriction {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Result {
Required,
Removed,
Deny,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyEvaluationResult {
#[serde(rename = "policyInfo", default, skip_serializing_if = "Option::is_none")]
pub policy_info: Option<PolicyReference>,
#[serde(rename = "evaluationResult", default, skip_serializing_if = "Option::is_none")]
pub evaluation_result: Option<String>,
#[serde(rename = "evaluationDetails", default, skip_serializing_if = "Option::is_none")]
pub evaluation_details: Option<PolicyEvaluationDetails>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyReference {
#[serde(rename = "policyDefinitionId", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_id: Option<String>,
#[serde(rename = "policySetDefinitionId", default, skip_serializing_if = "Option::is_none")]
pub policy_set_definition_id: Option<String>,
#[serde(rename = "policyDefinitionReferenceId", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_reference_id: Option<String>,
#[serde(rename = "policyAssignmentId", default, skip_serializing_if = "Option::is_none")]
pub policy_assignment_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AttestationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Attestation>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Attestation {
#[serde(flatten)]
pub resource: Resource,
pub properties: AttestationProperties,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AttestationProperties {
#[serde(rename = "policyAssignmentId")]
pub policy_assignment_id: String,
#[serde(rename = "policyDefinitionReferenceId", default, skip_serializing_if = "Option::is_none")]
pub policy_definition_reference_id: Option<String>,
#[serde(rename = "complianceState", default, skip_serializing_if = "Option::is_none")]
pub compliance_state: Option<attestation_properties::ComplianceState>,
#[serde(rename = "expiresOn", default, skip_serializing_if = "Option::is_none")]
pub expires_on: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub owner: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub comments: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub evidence: Vec<AttestationEvidence>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "lastComplianceStateChangeAt", default, skip_serializing_if = "Option::is_none")]
pub last_compliance_state_change_at: Option<String>,
}
pub mod attestation_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ComplianceState {
Compliant,
NonCompliant,
Unknown,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AttestationEvidence {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "sourceUri", default, skip_serializing_if = "Option::is_none")]
pub source_uri: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SystemData {
#[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")]
pub created_by: Option<String>,
#[serde(rename = "createdByType", default, skip_serializing_if = "Option::is_none")]
pub created_by_type: Option<system_data::CreatedByType>,
#[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "lastModifiedBy", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by: Option<String>,
#[serde(rename = "lastModifiedByType", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by_type: Option<system_data::LastModifiedByType>,
#[serde(rename = "lastModifiedAt", default, skip_serializing_if = "Option::is_none")]
pub last_modified_at: Option<String>,
}
pub mod system_data {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CreatedByType {
User,
Application,
ManagedIdentity,
Key,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LastModifiedByType {
User,
Application,
ManagedIdentity,
Key,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
| TrackedResourceModificationDetails |
commit_store_ffi.rs | /*
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
use std::ffi::CStr;
use std::mem;
use std::os::raw::{c_char, c_void};
use std::slice;
use sawtooth::journal::commit_store::{
ByHeightDirection, CommitStore, CommitStoreByHeightIterator,
};
use sawtooth::{
protocol::block::BlockPair,
protos::{FromBytes, IntoBytes},
};
use transact::database::error::DatabaseError;
use transact::database::lmdb::LmdbDatabase;
use transact::protocol::{batch::Batch, transaction::Transaction};
#[repr(u32)]
#[derive(Debug)]
pub enum ErrorCode {
Success = 0,
// Input errors
NullPointerProvided = 0x01,
InvalidArgument = 0x02,
// output errors
DatabaseError = 0x10,
NotFound = 0x11,
StopIteration = 0x20,
}
macro_rules! check_null {
($($arg:expr) , *) => {
$(if $arg.is_null() { return ErrorCode::NullPointerProvided; })*
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_new(
database: *const c_void,
commit_store: *mut *const c_void,
) -> ErrorCode {
check_null!(database);
let db_ref = (database as *const LmdbDatabase).as_ref().unwrap();
let new_commit_store = CommitStore::new(db_ref.clone());
*commit_store = Box::into_raw(Box::new(new_commit_store)) as *const c_void;
ErrorCode::Success
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_drop(commit_store: *mut c_void) -> ErrorCode {
check_null!(commit_store);
Box::from_raw(commit_store as *mut CommitStore);
ErrorCode::Success
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_get_by_block_id(
commit_store: *mut c_void,
block_id: *const c_char,
block_ptr: *mut *const u8,
block_len: *mut usize,
block_cap: *mut usize,
) -> ErrorCode {
check_null!(commit_store, block_id);
match deref_cstr(block_id) {
Ok(block_id) => match (*(commit_store as *mut CommitStore)).get_by_block_id(block_id) {
Ok(block) => return_block(block, block_ptr, block_len, block_cap),
Err(err) => map_database_error(err),
},
Err(err) => err,
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_get_chain_head(
commit_store: *mut c_void,
block_ptr: *mut *const u8,
block_len: *mut usize,
block_cap: *mut usize,
) -> ErrorCode {
check_null!(commit_store);
match (*(commit_store as *mut CommitStore)).get_chain_head() {
Ok(block) => return_block(block, block_ptr, block_len, block_cap),
Err(err) => map_database_error(err),
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_get_by_batch_id(
commit_store: *mut c_void,
batch_id: *const c_char,
block_ptr: *mut *const u8,
block_len: *mut usize,
block_cap: *mut usize,
) -> ErrorCode {
check_null!(commit_store, batch_id);
match deref_cstr(batch_id) {
Ok(batch_id) => match (*(commit_store as *mut CommitStore)).get_by_batch_id(batch_id) {
Ok(block) => return_block(block, block_ptr, block_len, block_cap),
Err(err) => map_database_error(err),
},
Err(err) => err,
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_get_by_transaction_id(
commit_store: *mut c_void,
transaction_id: *const c_char,
block_ptr: *mut *const u8,
block_len: *mut usize,
block_cap: *mut usize,
) -> ErrorCode {
check_null!(commit_store, transaction_id);
match deref_cstr(transaction_id) {
Ok(transaction_id) => {
match (*(commit_store as *mut CommitStore)).get_by_transaction_id(transaction_id) {
Ok(block) => return_block(block, block_ptr, block_len, block_cap),
Err(err) => map_database_error(err),
}
}
Err(err) => err,
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_get_by_block_num(
commit_store: *mut c_void,
block_num: u64,
block_ptr: *mut *const u8,
block_len: *mut usize,
block_cap: *mut usize,
) -> ErrorCode |
#[no_mangle]
pub unsafe extern "C" fn commit_store_get_batch(
commit_store: *mut c_void,
batch_id: *const c_char,
batch_ptr: *mut *const u8,
batch_len: *mut usize,
batch_cap: *mut usize,
) -> ErrorCode {
check_null!(commit_store, batch_id);
match deref_cstr(batch_id) {
Ok(batch_id) => match (*(commit_store as *mut CommitStore)).get_batch(batch_id) {
Ok(batch) => return_batch(batch, batch_ptr, batch_len, batch_cap),
Err(err) => map_database_error(err),
},
Err(err) => err,
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_get_transaction(
commit_store: *mut c_void,
transaction_id: *const c_char,
transaction_ptr: *mut *const u8,
transaction_len: *mut usize,
transaction_cap: *mut usize,
) -> ErrorCode {
check_null!(commit_store, transaction_id);
match deref_cstr(transaction_id) {
Ok(transaction_id) => {
match (*(commit_store as *mut CommitStore)).get_transaction(transaction_id) {
Ok(transaction) => return_transaction(
transaction,
transaction_ptr,
transaction_len,
transaction_cap,
),
Err(err) => map_database_error(err),
}
}
Err(err) => err,
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_get_batch_by_transaction(
commit_store: *mut c_void,
transaction_id: *const c_char,
batch_ptr: *mut *const u8,
batch_len: *mut usize,
batch_cap: *mut usize,
) -> ErrorCode {
check_null!(commit_store, transaction_id);
match deref_cstr(transaction_id) {
Ok(transaction_id) => {
match (*(commit_store as *mut CommitStore)).get_batch_by_transaction(transaction_id) {
Ok(batch) => return_batch(batch, batch_ptr, batch_len, batch_cap),
Err(err) => map_database_error(err),
}
}
Err(err) => err,
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_contains_block(
commit_store: *mut c_void,
block_id: *const c_char,
contains_ptr: *mut bool,
) -> ErrorCode {
check_null!(commit_store, block_id);
match deref_cstr(block_id) {
Ok(block_id) => match (*(commit_store as *mut CommitStore)).contains_block(block_id) {
Ok(contains) => {
*contains_ptr = contains;
ErrorCode::Success
}
Err(err) => map_database_error(err),
},
Err(err) => err,
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_contains_batch(
commit_store: *mut c_void,
batch_id: *const c_char,
contains_ptr: *mut bool,
) -> ErrorCode {
check_null!(commit_store, batch_id);
match deref_cstr(batch_id) {
Ok(batch_id) => match (*(commit_store as *mut CommitStore)).contains_batch(batch_id) {
Ok(contains) => {
*contains_ptr = contains;
ErrorCode::Success
}
Err(err) => map_database_error(err),
},
Err(err) => err,
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_contains_transaction(
commit_store: *mut c_void,
transaction_id: *const c_char,
contains_ptr: *mut bool,
) -> ErrorCode {
check_null!(commit_store, transaction_id);
match deref_cstr(transaction_id) {
Ok(transaction_id) => {
match (*(commit_store as *mut CommitStore)).contains_transaction(transaction_id) {
Ok(contains) => {
*contains_ptr = contains;
ErrorCode::Success
}
Err(err) => map_database_error(err),
}
}
Err(err) => err,
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_get_block_count(
commit_store: *mut c_void,
count_ptr: *mut usize,
) -> ErrorCode {
check_null!(commit_store);
match (*(commit_store as *mut CommitStore)).get_block_count() {
Ok(count) => {
*count_ptr = count;
ErrorCode::Success
}
Err(err) => map_database_error(err),
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_get_batch_count(
commit_store: *mut c_void,
count_ptr: *mut usize,
) -> ErrorCode {
check_null!(commit_store);
match (*(commit_store as *mut CommitStore)).get_batch_count() {
Ok(count) => {
*count_ptr = count;
ErrorCode::Success
}
Err(err) => map_database_error(err),
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_get_transaction_count(
commit_store: *mut c_void,
count_ptr: *mut usize,
) -> ErrorCode {
check_null!(commit_store);
match (*(commit_store as *mut CommitStore)).get_transaction_count() {
Ok(count) => {
*count_ptr = count;
ErrorCode::Success
}
Err(err) => map_database_error(err),
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_get_block_iter(
commit_store: *mut c_void,
start_block_num: *const u64,
decreasing: bool,
block_iter_ptr: *mut *const c_void,
) -> ErrorCode {
check_null!(commit_store);
let start = if start_block_num.is_null() {
None
} else {
Some(*start_block_num)
};
let direction = if decreasing {
ByHeightDirection::Decreasing
} else {
ByHeightDirection::Increasing
};
let block_iter =
(*(commit_store as *mut CommitStore)).get_block_by_height_iter(start, direction);
*block_iter_ptr = Box::into_raw(Box::new(block_iter)) as *const c_void;
ErrorCode::Success
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_block_by_height_iter_next(
block_iter_ptr: *mut c_void,
block_ptr: *mut *const u8,
block_len: *mut usize,
block_cap: *mut usize,
) -> ErrorCode {
check_null!(block_iter_ptr);
if let Some(block) = (*(block_iter_ptr as *mut CommitStoreByHeightIterator)).next() {
return_block(block, block_ptr, block_len, block_cap)
} else {
ErrorCode::StopIteration
}
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_block_by_height_iter_drop(
block_iter_ptr: *mut c_void,
) -> ErrorCode {
check_null!(block_iter_ptr);
Box::from_raw(block_iter_ptr as *mut CommitStoreByHeightIterator);
ErrorCode::Success
}
#[repr(C)]
#[derive(Debug)]
pub struct PutEntry {
block_bytes: *mut u8,
block_bytes_len: usize,
}
#[no_mangle]
pub unsafe extern "C" fn commit_store_put_blocks(
commit_store: *mut c_void,
blocks: *const *const c_void,
blocks_len: usize,
) -> ErrorCode {
check_null!(commit_store, blocks);
let blocks_result = slice::from_raw_parts(blocks, blocks_len)
.iter()
.map(|ptr| {
let entry = *ptr as *const PutEntry;
let payload = slice::from_raw_parts((*entry).block_bytes, (*entry).block_bytes_len);
BlockPair::from_bytes(&payload)
})
.collect::<Result<_, _>>();
match blocks_result {
Ok(blocks) => match (*(commit_store as *mut CommitStore)).put_blocks(blocks) {
Ok(_) => ErrorCode::Success,
Err(err) => {
error!("Error putting blocks: {:?}", err);
ErrorCode::DatabaseError
}
},
Err(err) => {
error!("Error deserializing blocks from FFI: {:?}", err);
ErrorCode::InvalidArgument
}
}
}
// FFI Helpers
unsafe fn return_block(
block_pair: BlockPair,
block_ptr: *mut *const u8,
block_len: *mut usize,
block_cap: *mut usize,
) -> ErrorCode {
match block_pair.into_bytes() {
Ok(payload) => {
*block_cap = payload.capacity();
*block_len = payload.len();
*block_ptr = payload.as_slice().as_ptr();
mem::forget(payload);
ErrorCode::Success
}
Err(err) => {
warn!("Failed to serialize block to bytes: {}", err);
ErrorCode::DatabaseError
}
}
}
unsafe fn return_batch(
batch: Batch,
batch_ptr: *mut *const u8,
batch_len: *mut usize,
batch_cap: *mut usize,
) -> ErrorCode {
match batch.into_bytes() {
Ok(payload) => {
*batch_cap = payload.capacity();
*batch_len = payload.len();
*batch_ptr = payload.as_slice().as_ptr();
mem::forget(payload);
ErrorCode::Success
}
Err(err) => {
warn!("Failed to serialize batch to bytes: {}", err);
ErrorCode::DatabaseError
}
}
}
unsafe fn return_transaction(
transaction: Transaction,
transaction_ptr: *mut *const u8,
transaction_len: *mut usize,
transaction_cap: *mut usize,
) -> ErrorCode {
match transaction.into_bytes() {
Ok(payload) => {
*transaction_cap = payload.capacity();
*transaction_len = payload.len();
*transaction_ptr = payload.as_slice().as_ptr();
mem::forget(payload);
ErrorCode::Success
}
Err(err) => {
warn!("Failed to serialize transaction to bytes: {}", err);
ErrorCode::DatabaseError
}
}
}
fn map_database_error(err: DatabaseError) -> ErrorCode {
match err {
DatabaseError::NotFoundError(_) => ErrorCode::NotFound,
err => {
error!("Database error: {:?}", err);
ErrorCode::DatabaseError
}
}
}
unsafe fn deref_cstr<'a>(cstr: *const c_char) -> Result<&'a str, ErrorCode> {
CStr::from_ptr(cstr)
.to_str()
.map_err(|_| ErrorCode::InvalidArgument)
}
| {
check_null!(commit_store);
match (*(commit_store as *mut CommitStore)).get_by_block_num(block_num) {
Ok(block) => return_block(block, block_ptr, block_len, block_cap),
Err(err) => map_database_error(err),
}
} |
index.js | import { setPublicPath } from "systemjs-webpack-interop";
setPublicPath("@microapp/{MICROAPP_NPM_PACKAGE_NAME}"); |
import Module from "./module";
export default Module; |
|
io_utils.py | from tweet_handler import Tweet, POI
import paras
from random import *
from time import time, ctime
from collections import defaultdict
import numpy as np
from sys import platform
import multiprocessing
from multiprocessing import *
from functools import partial
import cPickle as pickle
class IO:
|
if __name__ == '__main__':
# IO().merge_phrases_into_tweets_fields_adhoc()
IO().read_tweets()
| def __init__(self):
self.root_dir = '/Users/keyangzhang/Documents/UIUC/Research/Embedding/embedding/data/'+paras.pd['dataset']+'/'\
if platform == 'darwin' else '/shared/data/kzhang53/embedding/'+paras.pd['dataset']+'/'
self.input_dir = self.root_dir+'input/'
self.output_dir = self.root_dir+'output/'
self.models_dir = self.root_dir+'models/'
self.tweet_file = self.input_dir+'tweets'+str(paras.pd['data_size'])+'.txt'
self.poi_file = self.input_dir+'pois.txt'
self.case_study_dir = self.output_dir+'case_study/'
def get_voca(self, tweets, voca_min=0, voca_max=20000):
word2freq = defaultdict(int)
for tweet in tweets:
for word in tweet.words:
word2freq[word] += 1
word_and_freq = word2freq.items()
word_and_freq.sort(reverse=True, key=lambda tup:tup[1])
# print 'get_voca', len(tweets), len(word2freq)
voca = set(zip(*word_and_freq[voca_min:voca_max])[0])
if '' in voca:
voca.remove('')
return voca
def read_pois(self):
pois = []
for line in open(self.poi_file):
fields = line.strip().split(',')
if len(fields)<5:
continue
poi_id, lat, lng, cat, name = fields[0], float(fields[1]), float(fields[2]), fields[3], ','.join(fields[4:]).lower()
pois.append(POI(poi_id, lat, lng, cat, name))
return pois
def read_tweets(self, file_path=None):
tweets = []
if paras.pd['dataset']=='4sq':
for line in open('/shared/data/czhang82/clean/ny_checkins/checkins.txt'):
tweet = Tweet()
tweet.load_checkin(line.strip())
tweets.append(tweet)
if len(tweets)==paras.pd['data_size']:
break
elif paras.pd['dataset']=='old_la':
tweets = pickle.load(open(self.models_dir+'act_tweets_'+str(paras.pd['data_size'])+'.model','r'))
for tweet in tweets:
tweet.category = ''
elif paras.pd['dataset']=='old_ny':
for line in open("/shared/data/czhang82/clean/ny_tweets/tweets.txt"):
tweet = Tweet()
tweet.load_old_ny(line.strip())
tweets.append(tweet)
elif paras.pd['dataset'] in ['ny','la']:
for line in open(file_path if file_path else self.tweet_file):
tweet = Tweet()
tweet.load_tweet(line.strip())
tweets.append(tweet)
else:
print 'no such dataset!'
exit(0)
return tweets |
listWrangler_20191216.py | '''
Author: Luke Hebert
Date begun: December 16th, 2019
Description: finds either the intersection, union, or unique items from a set of n lists
especially useful for comparing lists of genes
inputs for unique option need to be .txt files; this could be easily tweaked though
all input and output are forced to upper case; can be easily tweaked
'''
import os, sys
def getContents(paths_list):
|
slash = '\\' if os.name == 'nt' else '/'
arguments_list = sys.argv[:]
#INTERSECTION OF N LISTS
if "-i" in arguments_list:
#remove python program and -i arguments to make a pathway list
inPaths_list = list(arguments_list)
temp_pathsList = list(inPaths_list)
for path in temp_pathsList:
if (path == '-i') or ('.py' in path):
inPaths_list.remove(path)
#print out the pathway indexes so that user can select one as the output pathway directory
print('\n')
for i, path in enumerate(inPaths_list):
print(str(i) + '\t' + path)
#ask user to select output file name and directory
outFileName = raw_input("\nPlease enter the name (not the path) of the output txt file (include the file suffix):")
outPath_index = int(raw_input("\nPlease enter index of the file whose path will be used for the output file (an integer):"))
if len(inPaths_list) < 2: #user must specify at least two input files for this option
print('\nUser must specify at least two lists in order to find the intersection.')
else:
print("\nYou chose to find the intersection of " + str(len(inPaths_list)) + " lists.")
contents_dict = getContents(inPaths_list) #read the input files into a dictionary
intersection_list = [] #will fill this with intersection data only
for key, val in contents_dict.iteritems(): #for each input file's list data
if len(intersection_list) == 0: #if this is the first file's data evaluated, just copy it to output list
intersection_list = list(val)
else: #the heart of the algorithm
temp_list = [item for item in val if item in intersection_list] #this should create the intersection of val and intersection_list
intersection_list = list(temp_list) #update intersection_list using a deep copy
completeOutPath = slash.join(inPaths_list[outPath_index].split(slash)[:-1] + [outFileName]) #not the most readable, but this is the output path/name
#write intersection_list to the output file as a single column of data
with open(completeOutPath, 'w') as outFile:
for item in intersection_list:
outFile.write(item + '\n')
#UNION OF N LISTS
elif "-n" in arguments_list:
#remove python program and -n arguments to make a pathway list
inPaths_list = list(arguments_list)
temp_pathsList = list(inPaths_list)
for path in temp_pathsList:
if (path == '-n') or ('.py' in path):
inPaths_list.remove(path)
#print out the pathway indexes so that user can select one as the output pathway directory
print('\n')
for i, path in enumerate(inPaths_list):
print(str(i) + '\t' + path)
#ask user to select output file name and directory
outFileName = raw_input("\nPlease enter the name (not the path) of the output txt file (include the file suffix):")
outPath_index = int(raw_input("\nPlease enter index of the file whose path will be used for the output file (an integer):"))
if len(inPaths_list) < 2: #user must specify at least two input files for this option
print('\nUser must specify at least two lists in order to find the union.')
else:
print("\nYou chose to find the union of " + str(len(inPaths_list)) + " lists.")
contents_dict = getContents(inPaths_list) #read the input files into a dictionary
union_list = [] #will fill this with intersection data only
for key, val in contents_dict.iteritems(): #for each input file's list data
if len(union_list) == 0: #if this is the first file's data evaluated, just copy it to output list
union_list = list(val)
else: #the hearth of the algorithm
temp_list = union_list + val #update union list with current file's data/list
union_list = list(set(temp_list)) #remove any duplicates
completeOutPath = slash.join(inPaths_list[outPath_index].split(slash)[:-1] + [outFileName]) #not the most readable, but this is the output path/name
#write union_list to the output file as a single column of data
with open(completeOutPath, 'w') as outFile:
for item in union_list:
outFile.write(item + '\n')
#ITEMS UNIQUE TO EACH OF N LISTS
elif "-o" in arguments_list:
inPaths_list = list(arguments_list)
#remove python program file and selection arguments from arguments list
temp_pathsList = list(inPaths_list)
for path in temp_pathsList:
if (path == '-o') or ('.py' in path):
inPaths_list.remove(path)
if len(inPaths_list) < 2: #user must specify at least two input files for this option
print('\nUser must specify at least two lists in order to find the uniques.')
else:
print("\nYou chose to find the unnique values from " + str(len(inPaths_list)) + " lists.")
contents_dict = getContents(inPaths_list) #read the input files into a dictionary
union_list = [] #will fill this with intersection data only
for key, val in contents_dict.iteritems(): #for each input file's list data
unique_list = list(val)
temp_dict = contents_dict.copy()
del temp_dict[key] #we want to check current list against all other lists, but not itself
for key2, val2 in temp_dict.iteritems(): #go through all the lists except the current list of interest
unique_list = [item for item in unique_list if item not in val2] #keep only those that are unique to unique_list
outFilePath = key.replace(".txt", "_uniques.txt")
with open(outFilePath, 'w') as outFile:
for item in unique_list:
outFile.write(item + '\n')
#SET OF ONE LIST
elif "-s" in arguments_list:
print('\nYou have chosen to take the set of a single list.')
inPath = ''
for argument in arguments_list:
if ('.py' not in argument) and ('-s' not in argument):
inPath = str(argument) #deep copy
outList = []
with open(inPath, 'r') as inFile:
for line in inFile:
outList.append(line.strip('\n'))
outSet = set(outList)
outPath = inPath.replace(".txt", "_set.txt")
with open(outPath, 'w') as outFile:
for item in outSet:
outFile.write(item.upper() + '\n') | '''reads multiple files and assigns them to a dictionary with filepaths as keys and content lists as values'''
contents_dict = {}
for file in paths_list:
contents_dict[file] = []
with open(file, 'r') as inFile:
for line in inFile:
line = line.strip('\n').strip('\r')
contents_dict[file].append(line.upper())
return contents_dict |
ThemeSpaceListGeneric.py | class ThemeSpaceListGeneric:
| list = None
list_text = None
list_text_hi = None
list_title = None |
|
transformer.py | # ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
import copy
from typing import Optional, List
import math
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from util.misc import inverse_sigmoid
from models.row_column_decoupled_attention import MultiheadRCDA
class Transformer(nn.Module):
def __init__(self, d_model=256, nhead=8,
num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=1024, dropout=0.,
activation="relu", num_feature_levels=3,num_query_position = 300,num_query_pattern=3,
spatial_prior="learned",attention_type="RCDA"):
super().__init__()
self.d_model = d_model
self.nhead = nhead
self.attention_type = attention_type
encoder_layer = TransformerEncoderLayerSpatial(d_model, dim_feedforward,
dropout, activation, nhead , attention_type)
encoder_layer_level = TransformerEncoderLayerLevel(d_model, dim_feedforward,
dropout, activation, nhead)
decoder_layer = TransformerDecoderLayer(d_model, dim_feedforward,
dropout, activation, nhead,
num_feature_levels, attention_type)
if num_feature_levels == 1:
self.num_encoder_layers_level = 0
else:
self.num_encoder_layers_level = num_encoder_layers // 2
self.num_encoder_layers_spatial = num_encoder_layers - self.num_encoder_layers_level
self.encoder_layers = _get_clones(encoder_layer, self.num_encoder_layers_spatial)
self.encoder_layers_level = _get_clones(encoder_layer_level, self.num_encoder_layers_level)
self.decoder_layers = _get_clones(decoder_layer, num_decoder_layers)
self.spatial_prior=spatial_prior
if num_feature_levels>1:
self.level_embed = nn.Embedding(num_feature_levels, d_model)
self.num_pattern = num_query_pattern
self.pattern = nn.Embedding(self.num_pattern, d_model)
self.num_position = num_query_position
if self.spatial_prior == "learned":
self.position = nn.Embedding(self.num_position, 2)
self.adapt_pos2d = nn.Sequential(
nn.Linear(d_model, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model),
)
self.adapt_pos1d = nn.Sequential(
nn.Linear(d_model, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model),
)
self.num_layers = num_decoder_layers
num_classes = 91
self.class_embed = nn.Linear(d_model, num_classes)
self.bbox_embed = MLP(d_model, d_model, 4, 3)
self._reset_parameters()
def _reset_parameters(self):
num_pred = self.num_layers
num_classes = 91
prior_prob = 0.01
bias_value = -math.log((1 - prior_prob) / prior_prob)
self.class_embed.bias.data = torch.ones(num_classes) * bias_value
nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
if self.spatial_prior == "learned":
nn.init.uniform_(self.position.weight.data, 0, 1)
nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
def forward(self, srcs, masks):
# prepare input for decoder
bs, l, c, h, w = srcs.shape
if self.spatial_prior == "learned":
reference_points = self.position.weight.unsqueeze(0).repeat(bs, self.num_pattern, 1)
elif self.spatial_prior == "grid":
nx=ny=round(math.sqrt(self.num_position))
self.num_position=nx*ny | reference_points = reference_points.unsqueeze(0).repeat(bs, self.num_pattern, 1)
else:
raise ValueError(f'unknown {self.spatial_prior} spatial prior')
tgt = self.pattern.weight.reshape(1, self.num_pattern, 1, c).repeat(bs, 1, self.num_position, 1).reshape(
bs, self.num_pattern * self.num_position, c)
mask = masks[-1].unsqueeze(1).repeat(1,l,1,1).reshape(bs*l,h,w)
pos_col, pos_row = mask2pos(mask)
if self.attention_type=="RCDA":
posemb_row = self.adapt_pos1d(pos2posemb1d(pos_row))
posemb_col = self.adapt_pos1d(pos2posemb1d(pos_col))
posemb_2d = None
else:
pos_2d = torch.cat([pos_row.unsqueeze(1).repeat(1, h, 1).unsqueeze(-1), pos_col.unsqueeze(2).repeat(1, 1, w).unsqueeze(-1)],dim=-1)
posemb_2d = self.adapt_pos2d(pos2posemb2d(pos_2d))
posemb_row = posemb_col = None
outputs = srcs.reshape(bs * l, c, h, w)
for idx in range(len(self.encoder_layers)):
outputs = self.encoder_layers[idx](outputs, mask, posemb_row, posemb_col,posemb_2d)
if idx < self.num_encoder_layers_level:
outputs = self.encoder_layers_level[idx](outputs, level_emb=self.level_embed.weight.unsqueeze(1).unsqueeze(0).repeat(bs,1,1,1).reshape(bs*l,1,c))
srcs = outputs.reshape(bs, l, c, h, w)
output = tgt
outputs_classes = []
outputs_coords = []
for lid, layer in enumerate(self.decoder_layers):
output = layer(output, reference_points, srcs, mask, adapt_pos2d=self.adapt_pos2d,
adapt_pos1d=self.adapt_pos1d, posemb_row=posemb_row, posemb_col=posemb_col,posemb_2d=posemb_2d)
reference = inverse_sigmoid(reference_points)
outputs_class = self.class_embed[lid](output)
tmp = self.bbox_embed[lid](output)
if reference.shape[-1] == 4:
tmp += reference
else:
assert reference.shape[-1] == 2
tmp[..., :2] += reference
outputs_coord = tmp.sigmoid()
outputs_classes.append(outputs_class[None,])
outputs_coords.append(outputs_coord[None,])
output = torch.cat(outputs_classes, dim=0), torch.cat(outputs_coords, dim=0)
return output
class TransformerEncoderLayerSpatial(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0., activation="relu",
n_heads=8, attention_type="RCDA"):
super().__init__()
self.attention_type = attention_type
if attention_type=="RCDA":
attention_module=MultiheadRCDA
elif attention_type == "nn.MultiheadAttention":
attention_module=nn.MultiheadAttention
else:
raise ValueError(f'unknown {attention_type} attention_type')
# self attention
self.self_attn = attention_module(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.ffn = FFN(d_model, d_ffn, dropout, activation)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward(self, src, padding_mask=None, posemb_row=None, posemb_col=None,posemb_2d=None):
# self attention
bz, c, h, w = src.shape
src = src.permute(0, 2, 3, 1)
if self.attention_type=="RCDA":
posemb_row = posemb_row.unsqueeze(1).repeat(1, h, 1, 1)
posemb_col = posemb_col.unsqueeze(2).repeat(1, 1, w, 1)
src2 = self.self_attn((src + posemb_row).reshape(bz, h * w, c), (src + posemb_col).reshape(bz, h * w, c),
src + posemb_row, src + posemb_col,
src, key_padding_mask=padding_mask)[0].transpose(0, 1).reshape(bz, h, w, c)
else:
src2 = self.self_attn((src + posemb_2d).reshape(bz, h * w, c).transpose(0, 1),
(src + posemb_2d).reshape(bz, h * w, c).transpose(0, 1),
src.reshape(bz, h * w, c).transpose(0, 1))[0].transpose(0, 1).reshape(bz, h, w, c)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.ffn(src)
src = src.permute(0, 3, 1, 2)
return src
class TransformerEncoderLayerLevel(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0., activation="relu",
n_heads=8):
super().__init__()
# self attention
self.self_attn_level = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.ffn = FFN(d_model, d_ffn, dropout, activation)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward(self, src, level_emb=0):
# self attention
bz, c, h, w = src.shape
src = src.permute(0, 2, 3, 1)
src2 = self.self_attn_level(src.reshape(bz, h * w, c) + level_emb, src.reshape(bz, h * w, c) + level_emb,
src.reshape(bz, h * w, c))[0].reshape(bz, h, w, c)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.ffn(src)
src = src.permute(0, 3, 1, 2)
return src
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model=256, d_ffn=1024,
dropout=0., activation="relu", n_heads=8,
n_levels=3, attention_type="RCDA"):
super().__init__()
self.attention_type = attention_type
self.attention_type = attention_type
if attention_type=="RCDA":
attention_module=MultiheadRCDA
elif attention_type == "nn.MultiheadAttention":
attention_module=nn.MultiheadAttention
else:
raise ValueError(f'unknown {attention_type} attention_type')
# cross attention
self.cross_attn = attention_module(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# level combination
if n_levels>1:
self.level_fc = nn.Linear(d_model * n_levels, d_model)
# ffn
self.ffn = FFN(d_model, d_ffn, dropout, activation)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward(self, tgt, reference_points, srcs, src_padding_masks=None, adapt_pos2d=None,
adapt_pos1d=None, posemb_row=None, posemb_col=None, posemb_2d=None):
tgt_len = tgt.shape[1]
query_pos = pos2posemb2d(reference_points.squeeze(2))
query_pos = adapt_pos2d(query_pos)
# self attention
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), tgt.transpose(0, 1))[0].transpose(0, 1)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
bz, l, c, h, w = srcs.shape
srcs = srcs.reshape(bz * l, c, h, w).permute(0, 2, 3, 1)
if self.attention_type == "RCDA":
query_pos_x = adapt_pos1d(pos2posemb1d(reference_points[..., 0]))
query_pos_y = adapt_pos1d(pos2posemb1d(reference_points[..., 1]))
posemb_row = posemb_row.unsqueeze(1).repeat(1, h, 1, 1)
posemb_col = posemb_col.unsqueeze(2).repeat(1, 1, w, 1)
src_row = src_col = srcs
k_row = src_row + posemb_row
k_col = src_col + posemb_col
tgt2 = self.cross_attn((tgt + query_pos_x).repeat(l, 1, 1), (tgt + query_pos_y).repeat(l, 1, 1), k_row, k_col,
srcs, key_padding_mask=src_padding_masks)[0].transpose(0, 1)
else:
tgt2 = self.cross_attn((tgt + query_pos).repeat(l, 1, 1).transpose(0, 1),
(srcs + posemb_2d).reshape(bz * l, h * w, c).transpose(0,1),
srcs.reshape(bz * l, h * w, c).transpose(0, 1))[0].transpose(0,1)
if l > 1:
tgt2 = self.level_fc(tgt2.reshape(bz, l, tgt_len, c).permute(0, 2, 3, 1).reshape(bz, tgt_len, c * l))
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# ffn
tgt = self.ffn(tgt)
return tgt
class FFN(nn.Module):
def __init__(self, d_model=256, d_ffn=1024, dropout=0., activation='relu'):
super().__init__()
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
def forward(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
nhead=args.nheads,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
dim_feedforward=args.dim_feedforward,
dropout=args.dropout,
activation="relu",
num_feature_levels=args.num_feature_levels,
num_query_position=args.num_query_position,
num_query_pattern=args.num_query_pattern,
spatial_prior=args.spatial_prior,
attention_type=args.attention_type,
)
def pos2posemb2d(pos, num_pos_feats=128, temperature=10000):
scale = 2 * math.pi
pos = pos * scale
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
pos_x = pos[..., 0, None] / dim_t
pos_y = pos[..., 1, None] / dim_t
pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)
pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2)
posemb = torch.cat((pos_y, pos_x), dim=-1)
return posemb
def pos2posemb1d(pos, num_pos_feats=256, temperature=10000):
scale = 2 * math.pi
pos = pos * scale
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
pos_x = pos[..., None] / dim_t
posemb = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)
return posemb
def mask2pos(mask):
not_mask = ~mask
y_embed = not_mask[:, :, 0].cumsum(1, dtype=torch.float32)
x_embed = not_mask[:, 0, :].cumsum(1, dtype=torch.float32)
y_embed = (y_embed - 0.5) / y_embed[:, -1:]
x_embed = (x_embed - 0.5) / x_embed[:, -1:]
return y_embed, x_embed | x = (torch.arange(nx) + 0.5) / nx
y = (torch.arange(ny) + 0.5) / ny
xy=torch.meshgrid(x,y)
reference_points=torch.cat([xy[0].reshape(-1)[...,None],xy[1].reshape(-1)[...,None]],-1).cuda() |
operations.rs | #![doc = "generated by AutoRust"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::models;
#[derive(Clone)]
pub struct Client {
endpoint: String,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
pipeline: azure_core::Pipeline,
}
#[derive(Clone)]
pub struct ClientBuilder {
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
endpoint: Option<String>,
scopes: Option<Vec<String>>,
}
pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD;
impl ClientBuilder {
pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self {
Self {
credential,
endpoint: None,
scopes: None,
}
}
pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self {
self.endpoint = Some(endpoint.into());
self
}
pub fn scopes(mut self, scopes: &[&str]) -> Self {
self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect());
self
}
pub fn build(self) -> Client {
let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned());
let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]);
Client::new(endpoint, self.credential, scopes)
}
}
impl Client {
pub(crate) fn endpoint(&self) -> &str {
self.endpoint.as_str()
}
pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential {
self.credential.as_ref()
}
pub(crate) fn scopes(&self) -> Vec<&str> {
self.scopes.iter().map(String::as_str).collect()
}
pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> azure_core::error::Result<azure_core::Response> {
let mut context = azure_core::Context::default();
let mut request = request.into();
self.pipeline.send(&mut context, &mut request).await
}
pub fn new(
endpoint: impl Into<String>,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
) -> Self {
let endpoint = endpoint.into();
let pipeline = azure_core::Pipeline::new(
option_env!("CARGO_PKG_NAME"),
option_env!("CARGO_PKG_VERSION"),
azure_core::ClientOptions::default(),
Vec::new(),
Vec::new(),
);
Self {
endpoint,
credential,
scopes,
pipeline,
}
}
pub fn backup_engines(&self) -> backup_engines::Client {
backup_engines::Client(self.clone())
}
pub fn backup_jobs(&self) -> backup_jobs::Client {
backup_jobs::Client(self.clone())
}
pub fn backup_operation_results(&self) -> backup_operation_results::Client {
backup_operation_results::Client(self.clone())
}
pub fn backup_operation_statuses(&self) -> backup_operation_statuses::Client {
backup_operation_statuses::Client(self.clone())
}
pub fn backup_policies(&self) -> backup_policies::Client {
backup_policies::Client(self.clone())
}
pub fn backup_protectable_items(&self) -> backup_protectable_items::Client {
backup_protectable_items::Client(self.clone())
}
pub fn backup_protected_items(&self) -> backup_protected_items::Client {
backup_protected_items::Client(self.clone())
}
pub fn backup_protection_containers(&self) -> backup_protection_containers::Client {
backup_protection_containers::Client(self.clone())
}
pub fn backup_protection_intent(&self) -> backup_protection_intent::Client {
backup_protection_intent::Client(self.clone())
}
pub fn backup_resource_encryption_configs(&self) -> backup_resource_encryption_configs::Client {
backup_resource_encryption_configs::Client(self.clone())
}
pub fn backup_resource_storage_configs_non_crr(&self) -> backup_resource_storage_configs_non_crr::Client {
backup_resource_storage_configs_non_crr::Client(self.clone())
}
pub fn backup_resource_vault_configs(&self) -> backup_resource_vault_configs::Client {
backup_resource_vault_configs::Client(self.clone())
}
pub fn backup_status(&self) -> backup_status::Client {
backup_status::Client(self.clone())
}
pub fn backup_usage_summaries(&self) -> backup_usage_summaries::Client {
backup_usage_summaries::Client(self.clone())
}
pub fn backup_workload_items(&self) -> backup_workload_items::Client {
backup_workload_items::Client(self.clone())
}
pub fn backups(&self) -> backups::Client {
backups::Client(self.clone())
}
pub fn bms_prepare_data_move_operation_result(&self) -> bms_prepare_data_move_operation_result::Client {
bms_prepare_data_move_operation_result::Client(self.clone())
}
pub fn export_jobs_operation_results(&self) -> export_jobs_operation_results::Client {
export_jobs_operation_results::Client(self.clone())
}
pub fn feature_support(&self) -> feature_support::Client {
feature_support::Client(self.clone())
}
pub fn item_level_recovery_connections(&self) -> item_level_recovery_connections::Client {
item_level_recovery_connections::Client(self.clone())
}
pub fn job_cancellations(&self) -> job_cancellations::Client {
job_cancellations::Client(self.clone())
}
pub fn job_details(&self) -> job_details::Client {
job_details::Client(self.clone())
}
pub fn job_operation_results(&self) -> job_operation_results::Client {
job_operation_results::Client(self.clone())
}
pub fn jobs(&self) -> jobs::Client {
jobs::Client(self.clone())
}
pub fn operation(&self) -> operation::Client {
operation::Client(self.clone())
}
pub fn operations(&self) -> operations::Client {
operations::Client(self.clone())
}
pub fn private_endpoint(&self) -> private_endpoint::Client {
private_endpoint::Client(self.clone())
}
pub fn private_endpoint_connection(&self) -> private_endpoint_connection::Client {
private_endpoint_connection::Client(self.clone())
}
pub fn protectable_containers(&self) -> protectable_containers::Client {
protectable_containers::Client(self.clone())
}
pub fn protected_item_operation_results(&self) -> protected_item_operation_results::Client {
protected_item_operation_results::Client(self.clone())
}
pub fn protected_item_operation_statuses(&self) -> protected_item_operation_statuses::Client {
protected_item_operation_statuses::Client(self.clone())
}
pub fn protected_items(&self) -> protected_items::Client {
protected_items::Client(self.clone())
}
pub fn protection_container_operation_results(&self) -> protection_container_operation_results::Client {
protection_container_operation_results::Client(self.clone())
}
pub fn protection_container_refresh_operation_results(&self) -> protection_container_refresh_operation_results::Client {
protection_container_refresh_operation_results::Client(self.clone())
}
pub fn protection_containers(&self) -> protection_containers::Client {
protection_containers::Client(self.clone())
}
pub fn protection_intent(&self) -> protection_intent::Client {
protection_intent::Client(self.clone())
}
pub fn protection_policies(&self) -> protection_policies::Client {
protection_policies::Client(self.clone())
}
pub fn protection_policy_operation_results(&self) -> protection_policy_operation_results::Client {
protection_policy_operation_results::Client(self.clone())
}
pub fn protection_policy_operation_statuses(&self) -> protection_policy_operation_statuses::Client {
protection_policy_operation_statuses::Client(self.clone())
}
pub fn recovery_points(&self) -> recovery_points::Client {
recovery_points::Client(self.clone())
}
pub fn recovery_points_recommended_for_move(&self) -> recovery_points_recommended_for_move::Client {
recovery_points_recommended_for_move::Client(self.clone())
}
pub fn resource_guard_proxies(&self) -> resource_guard_proxies::Client {
resource_guard_proxies::Client(self.clone())
}
pub fn resource_guard_proxy(&self) -> resource_guard_proxy::Client {
resource_guard_proxy::Client(self.clone())
}
pub fn restores(&self) -> restores::Client {
restores::Client(self.clone())
}
pub fn security_pi_ns(&self) -> security_pi_ns::Client {
security_pi_ns::Client(self.clone())
}
pub fn validate_operation(&self) -> validate_operation::Client {
validate_operation::Client(self.clone())
}
pub fn validate_operation_results(&self) -> validate_operation_results::Client {
validate_operation_results::Client(self.clone())
}
pub fn validate_operation_statuses(&self) -> validate_operation_statuses::Client {
validate_operation_statuses::Client(self.clone())
}
}
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
BackupResourceStorageConfigsNonCrr_Get(#[from] backup_resource_storage_configs_non_crr::get::Error),
#[error(transparent)]
BackupResourceStorageConfigsNonCrr_Update(#[from] backup_resource_storage_configs_non_crr::update::Error),
#[error(transparent)]
BackupResourceStorageConfigsNonCrr_Patch(#[from] backup_resource_storage_configs_non_crr::patch::Error),
#[error(transparent)]
ProtectionIntent_Validate(#[from] protection_intent::validate::Error),
#[error(transparent)]
BackupStatus_Get(#[from] backup_status::get::Error),
#[error(transparent)]
FeatureSupport_Validate(#[from] feature_support::validate::Error),
#[error(transparent)]
ProtectionIntent_Get(#[from] protection_intent::get::Error),
#[error(transparent)]
ProtectionIntent_CreateOrUpdate(#[from] protection_intent::create_or_update::Error),
#[error(transparent)]
ProtectionIntent_Delete(#[from] protection_intent::delete::Error),
#[error(transparent)]
BackupProtectionIntent_List(#[from] backup_protection_intent::list::Error),
#[error(transparent)]
BackupUsageSummaries_List(#[from] backup_usage_summaries::list::Error),
#[error(transparent)]
Operations_List(#[from] operations::list::Error),
#[error(transparent)]
BackupResourceVaultConfigs_Get(#[from] backup_resource_vault_configs::get::Error),
#[error(transparent)]
BackupResourceVaultConfigs_Put(#[from] backup_resource_vault_configs::put::Error),
#[error(transparent)]
BackupResourceVaultConfigs_Update(#[from] backup_resource_vault_configs::update::Error),
#[error(transparent)]
BackupResourceEncryptionConfigs_Get(#[from] backup_resource_encryption_configs::get::Error),
#[error(transparent)]
BackupResourceEncryptionConfigs_Update(#[from] backup_resource_encryption_configs::update::Error),
#[error(transparent)]
PrivateEndpointConnection_Get(#[from] private_endpoint_connection::get::Error),
#[error(transparent)]
PrivateEndpointConnection_Put(#[from] private_endpoint_connection::put::Error),
#[error(transparent)]
PrivateEndpointConnection_Delete(#[from] private_endpoint_connection::delete::Error),
#[error(transparent)]
PrivateEndpoint_GetOperationStatus(#[from] private_endpoint::get_operation_status::Error),
#[error(transparent)]
GetOperationStatus(#[from] get_operation_status::Error),
#[error(transparent)]
BmsPrepareDataMove(#[from] bms_prepare_data_move::Error),
#[error(transparent)]
BmsPrepareDataMoveOperationResult_Get(#[from] bms_prepare_data_move_operation_result::get::Error),
#[error(transparent)]
BmsTriggerDataMove(#[from] bms_trigger_data_move::Error),
#[error(transparent)]
ProtectedItems_Get(#[from] protected_items::get::Error),
#[error(transparent)]
ProtectedItems_CreateOrUpdate(#[from] protected_items::create_or_update::Error),
#[error(transparent)]
ProtectedItems_Delete(#[from] protected_items::delete::Error),
#[error(transparent)]
ProtectedItemOperationResults_Get(#[from] protected_item_operation_results::get::Error),
#[error(transparent)]
RecoveryPoints_List(#[from] recovery_points::list::Error),
#[error(transparent)]
RecoveryPoints_Get(#[from] recovery_points::get::Error),
#[error(transparent)]
Restores_Trigger(#[from] restores::trigger::Error),
#[error(transparent)]
BackupPolicies_List(#[from] backup_policies::list::Error),
#[error(transparent)]
ProtectionPolicies_Get(#[from] protection_policies::get::Error),
#[error(transparent)]
ProtectionPolicies_CreateOrUpdate(#[from] protection_policies::create_or_update::Error),
#[error(transparent)]
ProtectionPolicies_Delete(#[from] protection_policies::delete::Error),
#[error(transparent)]
ProtectionPolicyOperationResults_Get(#[from] protection_policy_operation_results::get::Error),
#[error(transparent)]
BackupJobs_List(#[from] backup_jobs::list::Error),
#[error(transparent)]
JobDetails_Get(#[from] job_details::get::Error),
#[error(transparent)]
JobCancellations_Trigger(#[from] job_cancellations::trigger::Error),
#[error(transparent)]
JobOperationResults_Get(#[from] job_operation_results::get::Error),
#[error(transparent)]
ExportJobsOperationResults_Get(#[from] export_jobs_operation_results::get::Error),
#[error(transparent)]
Jobs_Export(#[from] jobs::export::Error),
#[error(transparent)]
BackupProtectedItems_List(#[from] backup_protected_items::list::Error),
#[error(transparent)]
Operation_Validate(#[from] operation::validate::Error),
#[error(transparent)]
ValidateOperation_Trigger(#[from] validate_operation::trigger::Error),
#[error(transparent)]
ValidateOperationResults_Get(#[from] validate_operation_results::get::Error),
#[error(transparent)]
ValidateOperationStatuses_Get(#[from] validate_operation_statuses::get::Error),
#[error(transparent)]
BackupEngines_List(#[from] backup_engines::list::Error),
#[error(transparent)]
BackupEngines_Get(#[from] backup_engines::get::Error),
#[error(transparent)]
ProtectionContainerRefreshOperationResults_Get(#[from] protection_container_refresh_operation_results::get::Error),
#[error(transparent)]
ProtectableContainers_List(#[from] protectable_containers::list::Error),
#[error(transparent)]
ProtectionContainers_Get(#[from] protection_containers::get::Error),
#[error(transparent)]
ProtectionContainers_Register(#[from] protection_containers::register::Error),
#[error(transparent)]
ProtectionContainers_Unregister(#[from] protection_containers::unregister::Error),
#[error(transparent)]
ProtectionContainers_Inquire(#[from] protection_containers::inquire::Error),
#[error(transparent)]
BackupWorkloadItems_List(#[from] backup_workload_items::list::Error),
#[error(transparent)]
ProtectionContainerOperationResults_Get(#[from] protection_container_operation_results::get::Error),
#[error(transparent)]
Backups_Trigger(#[from] backups::trigger::Error),
#[error(transparent)]
ProtectedItemOperationStatuses_Get(#[from] protected_item_operation_statuses::get::Error),
#[error(transparent)]
ItemLevelRecoveryConnections_Provision(#[from] item_level_recovery_connections::provision::Error),
#[error(transparent)]
ItemLevelRecoveryConnections_Revoke(#[from] item_level_recovery_connections::revoke::Error),
#[error(transparent)]
ProtectionContainers_Refresh(#[from] protection_containers::refresh::Error),
#[error(transparent)]
BackupOperationResults_Get(#[from] backup_operation_results::get::Error),
#[error(transparent)]
BackupOperationStatuses_Get(#[from] backup_operation_statuses::get::Error),
#[error(transparent)]
ProtectionPolicyOperationStatuses_Get(#[from] protection_policy_operation_statuses::get::Error),
#[error(transparent)]
BackupProtectableItems_List(#[from] backup_protectable_items::list::Error),
#[error(transparent)]
BackupProtectionContainers_List(#[from] backup_protection_containers::list::Error),
#[error(transparent)]
SecurityPiNs_Get(#[from] security_pi_ns::get::Error),
#[error(transparent)]
MoveRecoveryPoint(#[from] move_recovery_point::Error),
#[error(transparent)]
RecoveryPointsRecommendedForMove_List(#[from] recovery_points_recommended_for_move::list::Error),
#[error(transparent)]
ResourceGuardProxies_Get(#[from] resource_guard_proxies::get::Error),
#[error(transparent)]
ResourceGuardProxy_Get(#[from] resource_guard_proxy::get::Error),
#[error(transparent)]
ResourceGuardProxy_Put(#[from] resource_guard_proxy::put::Error),
#[error(transparent)]
ResourceGuardProxy_Delete(#[from] resource_guard_proxy::delete::Error),
#[error(transparent)]
ResourceGuardProxy_UnlockDelete(#[from] resource_guard_proxy::unlock_delete::Error),
}
pub mod backup_resource_storage_configs_non_crr {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
parameters: impl Into<models::BackupResourceConfigResource>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
parameters: parameters.into(),
}
}
pub fn patch(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
parameters: impl Into<models::BackupResourceConfigResource>,
) -> patch::Builder {
patch::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
parameters: parameters.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::NewErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::BackupResourceConfigResource, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupstorageconfig/vaultstorageconfig" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BackupResourceConfigResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::NewErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::NewErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: models::BackupResourceConfigResource,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::BackupResourceConfigResource, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupstorageconfig/vaultstorageconfig" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BackupResourceConfigResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::NewErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod patch {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::NewErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: models::BackupResourceConfigResource,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupstorageconfig/vaultstorageconfig" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::NO_CONTENT => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::NewErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod protection_intent {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "It will validate followings\r\n1. Vault capacity\r\n2. VM is already protected\r\n3. Any VM related configuration passed in properties."]
pub fn validate(
&self,
azure_region: impl Into<String>,
subscription_id: impl Into<String>,
parameters: impl Into<models::PreValidateEnableBackupRequest>,
) -> validate::Builder {
validate::Builder {
client: self.0.clone(),
azure_region: azure_region.into(),
subscription_id: subscription_id.into(),
parameters: parameters.into(),
}
}
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
intent_object_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
intent_object_name: intent_object_name.into(),
}
}
pub fn create_or_update(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
intent_object_name: impl Into<String>,
parameters: impl Into<models::ProtectionIntentResource>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
intent_object_name: intent_object_name.into(),
parameters: parameters.into(),
}
}
pub fn delete(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
intent_object_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
intent_object_name: intent_object_name.into(),
}
}
}
pub mod validate {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) azure_region: String,
pub(crate) subscription_id: String,
pub(crate) parameters: models::PreValidateEnableBackupRequest,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::PreValidateEnableBackupResponse, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/Subscriptions/{}/providers/Microsoft.RecoveryServices/locations/{}/backupPreValidateProtection",
self.client.endpoint(),
&self.subscription_id,
&self.azure_region
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::PreValidateEnableBackupResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) intent_object_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ProtectionIntentResource, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/backupProtectionIntent/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . intent_object_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectionIntentResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) intent_object_name: String,
pub(crate) parameters: models::ProtectionIntentResource,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ProtectionIntentResource, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/backupProtectionIntent/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . intent_object_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectionIntentResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) intent_object_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/backupProtectionIntent/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . intent_object_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::NO_CONTENT => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod backup_status {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Get the container backup status"]
pub fn get(
&self,
azure_region: impl Into<String>,
subscription_id: impl Into<String>,
parameters: impl Into<models::BackupStatusRequest>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
azure_region: azure_region.into(),
subscription_id: subscription_id.into(),
parameters: parameters.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) azure_region: String,
pub(crate) subscription_id: String,
pub(crate) parameters: models::BackupStatusRequest,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::BackupStatusResponse, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/Subscriptions/{}/providers/Microsoft.RecoveryServices/locations/{}/backupStatus",
self.client.endpoint(),
&self.subscription_id,
&self.azure_region
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BackupStatusResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod feature_support {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "It will validate if given feature with resource properties is supported in service"]
pub fn validate(
&self,
azure_region: impl Into<String>,
subscription_id: impl Into<String>,
parameters: impl Into<models::FeatureSupportRequest>,
) -> validate::Builder {
validate::Builder {
client: self.0.clone(),
azure_region: azure_region.into(),
subscription_id: subscription_id.into(),
parameters: parameters.into(),
}
}
}
pub mod validate {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) azure_region: String,
pub(crate) subscription_id: String,
pub(crate) parameters: models::FeatureSupportRequest,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::AzureVmResourceFeatureSupportResponse, Error>>
{
Box::pin(async move {
let url_str = &format!(
"{}/Subscriptions/{}/providers/Microsoft.RecoveryServices/locations/{}/backupValidateFeatures",
self.client.endpoint(),
&self.subscription_id,
&self.azure_region
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AzureVmResourceFeatureSupportResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod backup_protection_intent {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
filter: None,
skip_token: None,
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) filter: Option<String>,
pub(crate) skip_token: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn skip_token(mut self, skip_token: impl Into<String>) -> Self {
self.skip_token = Some(skip_token.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ProtectionIntentResourceList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupProtectionIntents",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = &self.skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectionIntentResourceList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod backup_usage_summaries {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
filter: None,
skip_token: None,
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) filter: Option<String>,
pub(crate) skip_token: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn skip_token(mut self, skip_token: impl Into<String>) -> Self {
self.skip_token = Some(skip_token.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::BackupManagementUsageList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupUsageSummaries",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = &self.skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BackupManagementUsageList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod operations {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self) -> list::Builder {
list::Builder { client: self.0.clone() }
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ClientDiscoveryResponse, Error>> {
Box::pin(async move {
let url_str = &format!("{}/providers/Microsoft.RecoveryServices/operations", self.client.endpoint(),);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ClientDiscoveryResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod backup_resource_vault_configs {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn put(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
parameters: impl Into<models::BackupResourceVaultConfigResource>,
) -> put::Builder {
put::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
parameters: parameters.into(),
}
}
pub fn update(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
parameters: impl Into<models::BackupResourceVaultConfigResource>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
parameters: parameters.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::NewErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::BackupResourceVaultConfigResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupconfig/vaultconfig",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BackupResourceVaultConfigResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::NewErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod put {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::NewErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: models::BackupResourceVaultConfigResource,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::BackupResourceVaultConfigResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupconfig/vaultconfig",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BackupResourceVaultConfigResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::NewErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::NewErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: models::BackupResourceVaultConfigResource,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::BackupResourceVaultConfigResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupconfig/vaultconfig",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BackupResourceVaultConfigResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::NewErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod backup_resource_encryption_configs {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
parameters: impl Into<models::BackupResourceEncryptionConfigResource>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
parameters: parameters.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::NewErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::BackupResourceEncryptionConfigExtendedResource, Error>>
{
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupEncryptionConfigs/backupResourceEncryptionConfig" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BackupResourceEncryptionConfigExtendedResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::NewErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::NewErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: models::BackupResourceEncryptionConfigResource,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupEncryptionConfigs/backupResourceEncryptionConfig" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::NewErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod private_endpoint_connection {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
private_endpoint_connection_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
private_endpoint_connection_name: private_endpoint_connection_name.into(),
}
}
pub fn put(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
private_endpoint_connection_name: impl Into<String>,
parameters: impl Into<models::PrivateEndpointConnectionResource>,
) -> put::Builder {
put::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
private_endpoint_connection_name: private_endpoint_connection_name.into(),
parameters: parameters.into(),
}
}
pub fn delete(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
private_endpoint_connection_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
private_endpoint_connection_name: private_endpoint_connection_name.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::NewErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) private_endpoint_connection_name: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::PrivateEndpointConnectionResource, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/privateEndpointConnections/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . private_endpoint_connection_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::PrivateEndpointConnectionResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::NewErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod put {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::PrivateEndpointConnectionResource),
Created201(models::PrivateEndpointConnectionResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) private_endpoint_connection_name: String,
pub(crate) parameters: models::PrivateEndpointConnectionResource,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/privateEndpointConnections/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . private_endpoint_connection_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::PrivateEndpointConnectionResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::PrivateEndpointConnectionResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) private_endpoint_connection_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/privateEndpointConnections/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . private_endpoint_connection_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod private_endpoint {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Gets the operation status for a private endpoint connection."]
pub fn get_operation_status(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
private_endpoint_connection_name: impl Into<String>,
operation_id: impl Into<String>,
) -> get_operation_status::Builder {
get_operation_status::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
private_endpoint_connection_name: private_endpoint_connection_name.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get_operation_status {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::NewErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) private_endpoint_connection_name: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationStatus, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/privateEndpointConnections/{}/operationsStatus/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . private_endpoint_connection_name , & self . operation_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationStatus =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::NewErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
impl Client {
pub fn get_operation_status(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
operation_id: impl Into<String>,
) -> get_operation_status::Builder {
get_operation_status::Builder {
client: self.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
operation_id: operation_id.into(),
}
}
pub fn bms_prepare_data_move(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
parameters: impl Into<models::PrepareDataMoveRequest>,
) -> bms_prepare_data_move::Builder {
bms_prepare_data_move::Builder {
client: self.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
parameters: parameters.into(),
}
}
pub fn bms_trigger_data_move(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
parameters: impl Into<models::TriggerDataMoveRequest>,
) -> bms_trigger_data_move::Builder {
bms_trigger_data_move::Builder {
client: self.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
parameters: parameters.into(),
}
}
#[doc = "Move recovery point from one datastore to another store."]
pub fn move_recovery_point(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
protected_item_name: impl Into<String>,
recovery_point_id: impl Into<String>,
parameters: impl Into<models::MoveRpAcrossTiersRequest>,
) -> move_recovery_point::Builder {
move_recovery_point::Builder {
client: self.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
protected_item_name: protected_item_name.into(),
recovery_point_id: recovery_point_id.into(),
parameters: parameters.into(),
}
}
}
pub mod get_operation_status {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::NewErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationStatus, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupstorageconfig/vaultstorageconfig/operationStatus/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . operation_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationStatus =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::NewErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod bms_prepare_data_move {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::NewErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: models::PrepareDataMoveRequest,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupstorageconfig/vaultstorageconfig/prepareDataMove" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::NewErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod bms_trigger_data_move {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::NewErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: models::TriggerDataMoveRequest,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupstorageconfig/vaultstorageconfig/triggerDataMove" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::NewErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod move_recovery_point {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) protected_item_name: String,
pub(crate) recovery_point_id: String,
pub(crate) parameters: models::MoveRpAcrossTiersRequest,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/recoveryPoints/{}/move" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name , & self . protected_item_name , & self . recovery_point_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod bms_prepare_data_move_operation_result {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
operation_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::VaultStorageConfigOperationResultResponse),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::NewErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupstorageconfig/vaultstorageconfig/operationResults/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . operation_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::VaultStorageConfigOperationResultResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::NewErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod protected_items {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
protected_item_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
protected_item_name: protected_item_name.into(),
filter: None,
}
}
pub fn create_or_update(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
protected_item_name: impl Into<String>,
parameters: impl Into<models::ProtectedItemResource>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
protected_item_name: protected_item_name.into(),
parameters: parameters.into(),
}
}
pub fn delete(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
protected_item_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
protected_item_name: protected_item_name.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) protected_item_name: String,
pub(crate) filter: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ProtectedItemResource, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name , & self . protected_item_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectedItemResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::ProtectedItemResource),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) protected_item_name: String,
pub(crate) parameters: models::ProtectedItemResource,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name , & self . protected_item_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectedItemResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) protected_item_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name , & self . protected_item_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod protected_item_operation_results {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
protected_item_name: impl Into<String>,
operation_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
protected_item_name: protected_item_name.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::ProtectedItemResource),
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) protected_item_name: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/operationResults/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name , & self . protected_item_name , & self . operation_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectedItemResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod recovery_points {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
protected_item_name: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
protected_item_name: protected_item_name.into(),
filter: None,
}
}
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
protected_item_name: impl Into<String>,
recovery_point_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
protected_item_name: protected_item_name.into(),
recovery_point_id: recovery_point_id.into(),
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) protected_item_name: String,
pub(crate) filter: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RecoveryPointResourceList, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/recoveryPoints" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name , & self . protected_item_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RecoveryPointResourceList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) protected_item_name: String,
pub(crate) recovery_point_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RecoveryPointResource, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/recoveryPoints/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name , & self . protected_item_name , & self . recovery_point_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RecoveryPointResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod restores {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn trigger(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
protected_item_name: impl Into<String>,
recovery_point_id: impl Into<String>,
parameters: impl Into<models::RestoreRequestResource>,
) -> trigger::Builder {
trigger::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
protected_item_name: protected_item_name.into(),
recovery_point_id: recovery_point_id.into(),
parameters: parameters.into(),
}
}
}
pub mod trigger {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) protected_item_name: String,
pub(crate) recovery_point_id: String,
pub(crate) parameters: models::RestoreRequestResource,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/recoveryPoints/{}/restore" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name , & self . protected_item_name , & self . recovery_point_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod backup_policies {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
filter: None,
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) filter: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ProtectionPolicyResourceList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupPolicies",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectionPolicyResourceList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod protection_policies {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
policy_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
policy_name: policy_name.into(),
}
}
pub fn create_or_update(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
policy_name: impl Into<String>,
parameters: impl Into<models::ProtectionPolicyResource>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
policy_name: policy_name.into(),
parameters: parameters.into(),
}
}
pub fn delete(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
policy_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
policy_name: policy_name.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) policy_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ProtectionPolicyResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupPolicies/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name,
&self.policy_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectionPolicyResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::ProtectionPolicyResource),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) policy_name: String,
pub(crate) parameters: models::ProtectionPolicyResource,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupPolicies/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name,
&self.policy_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectionPolicyResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) policy_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupPolicies/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name,
&self.policy_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod protection_policy_operation_results {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
policy_name: impl Into<String>,
operation_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
policy_name: policy_name.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) policy_name: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ProtectionPolicyResource, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupPolicies/{}/operationResults/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . policy_name , & self . operation_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectionPolicyResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod backup_jobs {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
filter: None,
skip_token: None,
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) filter: Option<String>,
pub(crate) skip_token: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn skip_token(mut self, skip_token: impl Into<String>) -> Self {
self.skip_token = Some(skip_token.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::JobResourceList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupJobs",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = &self.skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::JobResourceList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod job_details {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
job_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
job_name: job_name.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) job_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::JobResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupJobs/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name,
&self.job_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::JobResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod job_cancellations {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn trigger(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
job_name: impl Into<String>,
) -> trigger::Builder {
trigger::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
job_name: job_name.into(),
}
}
}
pub mod trigger {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) job_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupJobs/{}/cancel",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name,
&self.job_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod job_operation_results {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
job_name: impl Into<String>,
operation_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
job_name: job_name.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) job_name: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupJobs/{}/operationResults/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . job_name , & self . operation_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod export_jobs_operation_results {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn | (
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
operation_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::OperationResultInfoBaseResource),
Accepted202(models::OperationResultInfoBaseResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupJobs/operationResults/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . operation_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationResultInfoBaseResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationResultInfoBaseResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod jobs {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn export(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> export::Builder {
export::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
filter: None,
}
}
}
pub mod export {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) filter: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupJobsExport",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod backup_protected_items {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
filter: None,
skip_token: None,
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) filter: Option<String>,
pub(crate) skip_token: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn skip_token(mut self, skip_token: impl Into<String>) -> Self {
self.skip_token = Some(skip_token.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ProtectedItemResourceList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupProtectedItems",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = &self.skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectedItemResourceList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod operation {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn validate(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
parameters: impl Into<models::ValidateOperationRequest>,
) -> validate::Builder {
validate::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
parameters: parameters.into(),
}
}
}
pub mod validate {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: models::ValidateOperationRequest,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ValidateOperationsResponse, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupValidateOperation",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ValidateOperationsResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod validate_operation {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn trigger(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
parameters: impl Into<models::ValidateOperationRequest>,
) -> trigger::Builder {
trigger::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
parameters: parameters.into(),
}
}
}
pub mod trigger {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: models::ValidateOperationRequest,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupTriggerValidateOperation" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod validate_operation_results {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
operation_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::ValidateOperationsResponse),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupValidateOperationResults/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . operation_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ValidateOperationsResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod validate_operation_statuses {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
operation_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationStatus, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupValidateOperationsStatuses/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . operation_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationStatus =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod backup_engines {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
filter: None,
skip_token: None,
}
}
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
backup_engine_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
backup_engine_name: backup_engine_name.into(),
filter: None,
skip_token: None,
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) filter: Option<String>,
pub(crate) skip_token: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn skip_token(mut self, skip_token: impl Into<String>) -> Self {
self.skip_token = Some(skip_token.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::BackupEngineBaseResourceList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupEngines",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = &self.skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BackupEngineBaseResourceList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) backup_engine_name: String,
pub(crate) filter: Option<String>,
pub(crate) skip_token: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn skip_token(mut self, skip_token: impl Into<String>) -> Self {
self.skip_token = Some(skip_token.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::BackupEngineBaseResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupEngines/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name,
&self.backup_engine_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = &self.skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BackupEngineBaseResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod protection_container_refresh_operation_results {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
operation_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/operationResults/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . operation_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod protectable_containers {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
filter: None,
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) filter: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ProtectableContainerResourceList, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectableContainers" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectableContainerResourceList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod protection_containers {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
}
}
pub fn register(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
parameters: impl Into<models::ProtectionContainerResource>,
) -> register::Builder {
register::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
parameters: parameters.into(),
}
}
pub fn unregister(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
) -> unregister::Builder {
unregister::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
}
}
#[doc = "Inquires all the protectable items under the given container."]
pub fn inquire(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
) -> inquire::Builder {
inquire::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
filter: None,
}
}
pub fn refresh(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
) -> refresh::Builder {
refresh::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
filter: None,
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ProtectionContainerResource, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectionContainerResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod register {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::ProtectionContainerResource),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) parameters: models::ProtectionContainerResource,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectionContainerResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod unregister {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod inquire {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) filter: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/inquire" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod refresh {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) filter: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/refreshContainers" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod backup_workload_items {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
filter: None,
skip_token: None,
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) filter: Option<String>,
pub(crate) skip_token: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn skip_token(mut self, skip_token: impl Into<String>) -> Self {
self.skip_token = Some(skip_token.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::WorkloadItemResourceList, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/items" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = &self.skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::WorkloadItemResourceList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod protection_container_operation_results {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
operation_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::ProtectionContainerResource),
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/operationResults/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name , & self . operation_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectionContainerResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod backups {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn trigger(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
protected_item_name: impl Into<String>,
parameters: impl Into<models::BackupRequestResource>,
) -> trigger::Builder {
trigger::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
protected_item_name: protected_item_name.into(),
parameters: parameters.into(),
}
}
}
pub mod trigger {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) protected_item_name: String,
pub(crate) parameters: models::BackupRequestResource,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/backup" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name , & self . protected_item_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod protected_item_operation_statuses {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
protected_item_name: impl Into<String>,
operation_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
protected_item_name: protected_item_name.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) protected_item_name: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationStatus, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/operationsStatus/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name , & self . protected_item_name , & self . operation_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationStatus =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod item_level_recovery_connections {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn provision(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
protected_item_name: impl Into<String>,
recovery_point_id: impl Into<String>,
parameters: impl Into<models::IlrRequestResource>,
) -> provision::Builder {
provision::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
protected_item_name: protected_item_name.into(),
recovery_point_id: recovery_point_id.into(),
parameters: parameters.into(),
}
}
pub fn revoke(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
protected_item_name: impl Into<String>,
recovery_point_id: impl Into<String>,
) -> revoke::Builder {
revoke::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
protected_item_name: protected_item_name.into(),
recovery_point_id: recovery_point_id.into(),
}
}
}
pub mod provision {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) protected_item_name: String,
pub(crate) recovery_point_id: String,
pub(crate) parameters: models::IlrRequestResource,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/recoveryPoints/{}/provisionInstantItemRecovery" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name , & self . protected_item_name , & self . recovery_point_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod revoke {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) protected_item_name: String,
pub(crate) recovery_point_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/recoveryPoints/{}/revokeInstantItemRecovery" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name , & self . protected_item_name , & self . recovery_point_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod backup_operation_results {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
operation_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupOperationResults/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name,
&self.operation_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod backup_operation_statuses {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
operation_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationStatus, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupOperations/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name,
&self.operation_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationStatus =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod protection_policy_operation_statuses {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
policy_name: impl Into<String>,
operation_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
policy_name: policy_name.into(),
operation_id: operation_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) policy_name: String,
pub(crate) operation_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationStatus, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupPolicies/{}/operations/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . policy_name , & self . operation_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationStatus =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod backup_protectable_items {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
filter: None,
skip_token: None,
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) filter: Option<String>,
pub(crate) skip_token: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn skip_token(mut self, skip_token: impl Into<String>) -> Self {
self.skip_token = Some(skip_token.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::WorkloadProtectableItemResourceList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupProtectableItems",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = &self.skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::WorkloadProtectableItemResourceList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod backup_protection_containers {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
filter: None,
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) filter: Option<String>,
}
impl Builder {
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ProtectionContainerResourceList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupProtectionContainers",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProtectionContainerResourceList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod security_pi_ns {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
parameters: None,
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: Option<models::SecurityPinBase>,
}
impl Builder {
pub fn parameters(mut self, parameters: impl Into<models::SecurityPinBase>) -> Self {
self.parameters = Some(parameters.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::TokenInformation, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupSecurityPIN",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = if let Some(parameters) = &self.parameters {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(parameters).map_err(Error::Serialize)?
} else {
azure_core::EMPTY_BODY
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::TokenInformation =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod recovery_points_recommended_for_move {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
fabric_name: impl Into<String>,
container_name: impl Into<String>,
protected_item_name: impl Into<String>,
parameters: impl Into<models::ListRecoveryPointsRecommendedForMoveRequest>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
fabric_name: fabric_name.into(),
container_name: container_name.into(),
protected_item_name: protected_item_name.into(),
parameters: parameters.into(),
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) fabric_name: String,
pub(crate) container_name: String,
pub(crate) protected_item_name: String,
pub(crate) parameters: models::ListRecoveryPointsRecommendedForMoveRequest,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RecoveryPointResourceList, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/recoveryPointsRecommendedForMove" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . fabric_name , & self . container_name , & self . protected_item_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RecoveryPointResourceList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod resource_guard_proxies {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ResourceGuardProxyBaseResourceList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupResourceGuardProxies",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.vault_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ResourceGuardProxyBaseResourceList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod resource_guard_proxy {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
resource_guard_proxy_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
resource_guard_proxy_name: resource_guard_proxy_name.into(),
}
}
pub fn put(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
resource_guard_proxy_name: impl Into<String>,
) -> put::Builder {
put::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
resource_guard_proxy_name: resource_guard_proxy_name.into(),
}
}
pub fn delete(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
resource_guard_proxy_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
resource_guard_proxy_name: resource_guard_proxy_name.into(),
}
}
pub fn unlock_delete(
&self,
vault_name: impl Into<String>,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
resource_guard_proxy_name: impl Into<String>,
parameters: impl Into<models::UnlockDeleteRequest>,
) -> unlock_delete::Builder {
unlock_delete::Builder {
client: self.0.clone(),
vault_name: vault_name.into(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
resource_guard_proxy_name: resource_guard_proxy_name.into(),
parameters: parameters.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) resource_guard_proxy_name: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ResourceGuardProxyBaseResource, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupResourceGuardProxies/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . resource_guard_proxy_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ResourceGuardProxyBaseResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod put {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) resource_guard_proxy_name: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ResourceGuardProxyBaseResource, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupResourceGuardProxies/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . resource_guard_proxy_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ResourceGuardProxyBaseResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) resource_guard_proxy_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupResourceGuardProxies/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . resource_guard_proxy_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod unlock_delete {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) vault_name: String,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) resource_guard_proxy_name: String,
pub(crate) parameters: models::UnlockDeleteRequest,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::UnlockDeleteResponse, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupResourceGuardProxies/{}/unlockDelete" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vault_name , & self . resource_guard_proxy_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-01-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::UnlockDeleteResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
| get |
rpc_util.go | /*
*
* Copyright 2014 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"bytes"
"compress/gzip"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"math"
"net/url"
"strings"
"sync"
"time"
"github.com/xiaotuancai/grpc/codes"
"github.com/xiaotuancai/grpc/credentials"
"github.com/xiaotuancai/grpc/encoding"
"github.com/xiaotuancai/grpc/encoding/proto"
"github.com/xiaotuancai/grpc/internal/transport"
"github.com/xiaotuancai/grpc/metadata"
"github.com/xiaotuancai/grpc/peer"
"github.com/xiaotuancai/grpc/stats"
"github.com/xiaotuancai/grpc/status"
"golang.org/x/net/context"
)
// Compressor defines the interface gRPC uses to compress a message.
//
// Deprecated: use package encoding.
type Compressor interface {
// Do compresses p into w.
Do(w io.Writer, p []byte) error
// Type returns the compression algorithm the Compressor uses.
Type() string
}
type gzipCompressor struct {
pool sync.Pool
}
// NewGZIPCompressor creates a Compressor based on GZIP.
//
// Deprecated: use package encoding/gzip.
func NewGZIPCompressor() Compressor {
c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression)
return c
}
// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead
// of assuming DefaultCompression.
//
// The error returned will be nil if the level is valid.
//
// Deprecated: use package encoding/gzip.
func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
if level < gzip.DefaultCompression || level > gzip.BestCompression {
return nil, fmt.Errorf("grpc: invalid compression level: %d", level)
}
return &gzipCompressor{
pool: sync.Pool{
New: func() interface{} {
w, err := gzip.NewWriterLevel(ioutil.Discard, level)
if err != nil {
panic(err)
}
return w
},
},
}, nil
}
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
z := c.pool.Get().(*gzip.Writer)
defer c.pool.Put(z)
z.Reset(w)
if _, err := z.Write(p); err != nil {
return err
}
return z.Close()
}
func (c *gzipCompressor) Type() string {
return "gzip"
}
// Decompressor defines the interface gRPC uses to decompress a message.
//
// Deprecated: use package encoding.
type Decompressor interface {
// Do reads the data from r and uncompress them.
Do(r io.Reader) ([]byte, error)
// Type returns the compression algorithm the Decompressor uses.
Type() string
}
type gzipDecompressor struct {
pool sync.Pool
}
// NewGZIPDecompressor creates a Decompressor based on GZIP.
//
// Deprecated: use package encoding/gzip.
func NewGZIPDecompressor() Decompressor {
return &gzipDecompressor{}
}
func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
var z *gzip.Reader
switch maybeZ := d.pool.Get().(type) {
case nil:
newZ, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
z = newZ
case *gzip.Reader:
z = maybeZ
if err := z.Reset(r); err != nil {
d.pool.Put(z)
return nil, err
}
}
defer func() {
z.Close()
d.pool.Put(z)
}()
return ioutil.ReadAll(z)
}
func (d *gzipDecompressor) Type() string {
return "gzip"
}
// callInfo contains all related configuration and information about an RPC.
type callInfo struct {
compressorType string
failFast bool
stream *clientStream
maxReceiveMessageSize *int
maxSendMessageSize *int
creds credentials.PerRPCCredentials
contentSubtype string
codec baseCodec
maxRetryRPCBufferSize int
}
func defaultCallInfo() *callInfo {
return &callInfo{
failFast: true,
maxRetryRPCBufferSize: 256 * 1024, // 256KB
}
}
// CallOption configures a Call before it starts or extracts information from
// a Call after it completes.
type CallOption interface {
// before is called before the call is sent to any server. If before
// returns a non-nil error, the RPC fails with that error.
before(*callInfo) error
// after is called after the call has completed. after cannot return an
// error, so any failures should be reported via output parameters.
after(*callInfo)
}
// EmptyCallOption does not alter the Call configuration.
// It can be embedded in another structure to carry satellite data for use
// by interceptors.
type EmptyCallOption struct{}
func (EmptyCallOption) before(*callInfo) error { return nil }
func (EmptyCallOption) after(*callInfo) {}
// Header returns a CallOptions that retrieves the header metadata
// for a unary RPC.
func Header(md *metadata.MD) CallOption {
return HeaderCallOption{HeaderAddr: md}
}
// HeaderCallOption is a CallOption for collecting response header metadata.
// The metadata field will be populated *after* the RPC completes.
// This is an EXPERIMENTAL API.
type HeaderCallOption struct {
HeaderAddr *metadata.MD
}
func (o HeaderCallOption) before(c *callInfo) error { return nil }
func (o HeaderCallOption) after(c *callInfo) {
if c.stream != nil {
*o.HeaderAddr, _ = c.stream.Header()
}
}
// Trailer returns a CallOptions that retrieves the trailer metadata
// for a unary RPC.
func Trailer(md *metadata.MD) CallOption {
return TrailerCallOption{TrailerAddr: md}
}
// TrailerCallOption is a CallOption for collecting response trailer metadata.
// The metadata field will be populated *after* the RPC completes.
// This is an EXPERIMENTAL API.
type TrailerCallOption struct {
TrailerAddr *metadata.MD
}
func (o TrailerCallOption) before(c *callInfo) error { return nil }
func (o TrailerCallOption) after(c *callInfo) {
if c.stream != nil {
*o.TrailerAddr = c.stream.Trailer()
}
}
// Peer returns a CallOption that retrieves peer information for a unary RPC.
// The peer field will be populated *after* the RPC completes.
func Peer(p *peer.Peer) CallOption {
return PeerCallOption{PeerAddr: p}
}
// PeerCallOption is a CallOption for collecting the identity of the remote
// peer. The peer field will be populated *after* the RPC completes.
// This is an EXPERIMENTAL API.
type PeerCallOption struct {
PeerAddr *peer.Peer
}
func (o PeerCallOption) before(c *callInfo) error { return nil }
func (o PeerCallOption) after(c *callInfo) {
if c.stream != nil {
if x, ok := peer.FromContext(c.stream.Context()); ok {
*o.PeerAddr = *x
}
}
}
// FailFast configures the action to take when an RPC is attempted on broken
// connections or unreachable servers. If failFast is true, the RPC will fail
// immediately. Otherwise, the RPC client will block the call until a
// connection is available (or the call is canceled or times out) and will
// retry the call if it fails due to a transient error. gRPC will not retry if
// data was written to the wire unless the server indicates it did not process
// the data. Please refer to
// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
//
// By default, RPCs are "Fail Fast".
func FailFast(failFast bool) CallOption {
return FailFastCallOption{FailFast: failFast}
}
// FailFastCallOption is a CallOption for indicating whether an RPC should fail
// fast or not.
// This is an EXPERIMENTAL API.
type FailFastCallOption struct {
FailFast bool
}
func (o FailFastCallOption) before(c *callInfo) error {
c.failFast = o.FailFast
return nil
}
func (o FailFastCallOption) after(c *callInfo) {}
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
func MaxCallRecvMsgSize(s int) CallOption {
return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s}
}
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
// size the client can receive.
// This is an EXPERIMENTAL API.
type MaxRecvMsgSizeCallOption struct {
MaxRecvMsgSize int
}
func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
c.maxReceiveMessageSize = &o.MaxRecvMsgSize
return nil
}
func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
func MaxCallSendMsgSize(s int) CallOption {
return MaxSendMsgSizeCallOption{MaxSendMsgSize: s}
}
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
// size the client can send.
// This is an EXPERIMENTAL API.
type MaxSendMsgSizeCallOption struct {
MaxSendMsgSize int
}
func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
c.maxSendMessageSize = &o.MaxSendMsgSize
return nil
}
func (o MaxSendMsgSizeCallOption) after(c *callInfo) {}
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
// for a call.
func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
return PerRPCCredsCallOption{Creds: creds}
}
// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
// credentials to use for the call.
// This is an EXPERIMENTAL API.
type PerRPCCredsCallOption struct {
Creds credentials.PerRPCCredentials
}
func (o PerRPCCredsCallOption) before(c *callInfo) error {
c.creds = o.Creds
return nil
}
func (o PerRPCCredsCallOption) after(c *callInfo) {}
// UseCompressor returns a CallOption which sets the compressor used when
// sending the request. If WithCompressor is also set, UseCompressor has
// higher priority.
//
// This API is EXPERIMENTAL.
func UseCompressor(name string) CallOption {
return CompressorCallOption{CompressorType: name}
}
// CompressorCallOption is a CallOption that indicates the compressor to use.
// This is an EXPERIMENTAL API.
type CompressorCallOption struct {
CompressorType string
}
func (o CompressorCallOption) before(c *callInfo) error {
c.compressorType = o.CompressorType
return nil
}
func (o CompressorCallOption) after(c *callInfo) {}
// CallContentSubtype returns a CallOption that will set the content-subtype
// for a call. For example, if content-subtype is "json", the Content-Type over
// the wire will be "application/grpc+json". The content-subtype is converted
// to lowercase before being included in Content-Type. See Content-Type on
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
// more details.
//
// If CallCustomCodec is not also used, the content-subtype will be used to
// look up the Codec to use in the registry controlled by RegisterCodec. See
// the documentation on RegisterCodec for details on registration. The lookup
// of content-subtype is case-insensitive. If no such Codec is found, the call
// will result in an error with code codes.Internal.
//
// If CallCustomCodec is also used, that Codec will be used for all request and
// response messages, with the content-subtype set to the given contentSubtype
// here for requests.
func CallContentSubtype(contentSubtype string) CallOption {
return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)}
}
// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
// used for marshaling messages.
// This is an EXPERIMENTAL API.
type ContentSubtypeCallOption struct {
ContentSubtype string
}
func (o ContentSubtypeCallOption) before(c *callInfo) error {
c.contentSubtype = o.ContentSubtype
return nil
}
func (o ContentSubtypeCallOption) after(c *callInfo) {}
// CallCustomCodec returns a CallOption that will set the given Codec to be
// used for all request and response messages for a call. The result of calling
// String() will be used as the content-subtype in a case-insensitive manner.
//
// See Content-Type on
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
// more details. Also see the documentation on RegisterCodec and
// CallContentSubtype for more details on the interaction between Codec and
// content-subtype.
//
// This function is provided for advanced users; prefer to use only
// CallContentSubtype to select a registered codec instead.
func CallCustomCodec(codec Codec) CallOption {
return CustomCodecCallOption{Codec: codec}
}
// CustomCodecCallOption is a CallOption that indicates the codec used for
// marshaling messages.
// This is an EXPERIMENTAL API.
type CustomCodecCallOption struct {
Codec Codec
}
func (o CustomCodecCallOption) before(c *callInfo) error {
c.codec = o.Codec
return nil
}
func (o CustomCodecCallOption) after(c *callInfo) {}
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
// used for buffering this RPC's requests for retry purposes.
//
// This API is EXPERIMENTAL.
func MaxRetryRPCBufferSize(bytes int) CallOption {
return MaxRetryRPCBufferSizeCallOption{bytes}
}
// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
// memory to be used for caching this RPC for retry purposes.
// This is an EXPERIMENTAL API.
type MaxRetryRPCBufferSizeCallOption struct {
MaxRetryRPCBufferSize int
}
func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
return nil
}
func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {}
// The format of the payload: compressed or not?
type payloadFormat uint8
const (
compressionNone payloadFormat = 0 // no compression
compressionMade payloadFormat = 1 // compressed
)
// parser reads complete gRPC messages from the underlying reader.
type parser struct {
// r is the underlying reader.
// See the comment on recvMsg for the permissible
// error types.
r io.Reader
// The header of a gRPC message. Find more detail at
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
header [5]byte
}
// recvMsg reads a complete gRPC message from the stream.
//
// It returns the message and its payload (compression/encoding)
// format. The caller owns the returned msg memory.
//
// If there is an error, possible values are:
// * io.EOF, when no messages remain
// * io.ErrUnexpectedEOF
// * of type transport.ConnectionError
// * an error from the status package
// No other error values or types must be returned, which also means
// that the underlying io.Reader must not return an incompatible
// error.
func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
if _, err := p.r.Read(p.header[:]); err != nil {
return 0, nil, err
}
pf = payloadFormat(p.header[0])
length := binary.BigEndian.Uint32(p.header[1:])
if length == 0 {
return pf, nil, nil
}
if int64(length) > int64(maxInt) {
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
}
if int(length) > maxReceiveMessageSize {
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
}
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
// of making it for each message:
msg = make([]byte, int(length))
if _, err := p.r.Read(msg); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return 0, nil, err
}
return pf, msg, nil
}
// encode serializes msg and returns a buffer containing the message, or an
// error if it is too large to be transmitted by grpc. If msg is nil, it
// generates an empty message.
func encode(c baseCodec, msg interface{}) ([]byte, error) {
if msg == nil { // NOTE: typed nils will not be caught by this check
return nil, nil
}
b, err := c.Marshal(msg)
if err != nil {
return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
}
if uint(len(b)) > math.MaxUint32 {
return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
}
return b, nil
}
// compress returns the input bytes compressed by compressor or cp. If both
// compressors are nil, returns nil.
//
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
if compressor == nil && cp == nil {
return nil, nil
}
wrapErr := func(err error) error {
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
}
cbuf := &bytes.Buffer{}
if compressor != nil {
z, _ := compressor.Compress(cbuf)
if _, err := z.Write(in); err != nil {
return nil, wrapErr(err)
}
if err := z.Close(); err != nil {
return nil, wrapErr(err)
}
} else {
if err := cp.Do(cbuf, in); err != nil {
return nil, wrapErr(err)
}
}
return cbuf.Bytes(), nil
}
const (
payloadLen = 1
sizeLen = 4
headerLen = payloadLen + sizeLen
)
// msgHeader returns a 5-byte header for the message being transmitted and the
// payload, which is compData if non-nil or data otherwise.
func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
hdr = make([]byte, headerLen)
if compData != nil {
hdr[0] = byte(compressionMade)
data = compData
} else {
hdr[0] = byte(compressionNone)
}
// Write length of payload into buf
binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
return hdr, data
}
func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
return &stats.OutPayload{
Client: client,
Payload: msg,
Data: data,
Length: len(data),
WireLength: len(payload) + headerLen,
SentTime: t,
}
}
func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
switch pf {
case compressionNone:
case compressionMade:
if recvCompress == "" || recvCompress == encoding.Identity |
if !haveCompressor {
return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
}
default:
return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
}
return nil
}
// For the two compressor parameters, both should not be set, but if they are,
// dc takes precedence over compressor.
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
pf, d, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
return err
}
if inPayload != nil {
inPayload.WireLength = len(d)
}
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
return st.Err()
}
if pf == compressionMade {
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
// use this decompressor as the default.
if dc != nil {
d, err = dc.Do(bytes.NewReader(d))
if err != nil {
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
} else {
dcReader, err := compressor.Decompress(bytes.NewReader(d))
if err != nil {
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
d, err = ioutil.ReadAll(dcReader)
if err != nil {
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
}
}
if len(d) > maxReceiveMessageSize {
// TODO: Revisit the error code. Currently keep it consistent with java
// implementation.
return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
}
if err := c.Unmarshal(d, m); err != nil {
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
}
if inPayload != nil {
inPayload.RecvTime = time.Now()
inPayload.Payload = m
// TODO truncate large payload.
inPayload.Data = d
inPayload.Length = len(d)
}
return nil
}
type rpcInfo struct {
failfast bool
}
type rpcInfoContextKey struct{}
func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context {
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast})
}
func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo)
return
}
// Code returns the error code for err if it was produced by the rpc system.
// Otherwise, it returns codes.Unknown.
//
// Deprecated: use status.FromError and Code method instead.
func Code(err error) codes.Code {
if s, ok := status.FromError(err); ok {
return s.Code()
}
return codes.Unknown
}
// ErrorDesc returns the error description of err if it was produced by the rpc system.
// Otherwise, it returns err.Error() or empty string when err is nil.
//
// Deprecated: use status.FromError and Message method instead.
func ErrorDesc(err error) string {
if s, ok := status.FromError(err); ok {
return s.Message()
}
return err.Error()
}
// Errorf returns an error containing an error code and a description;
// Errorf returns nil if c is OK.
//
// Deprecated: use status.Errorf instead.
func Errorf(c codes.Code, format string, a ...interface{}) error {
return status.Errorf(c, format, a...)
}
// setCallInfoCodec should only be called after CallOptions have been applied.
func setCallInfoCodec(c *callInfo) error {
if c.codec != nil {
// codec was already set by a CallOption; use it.
return nil
}
if c.contentSubtype == "" {
// No codec specified in CallOptions; use proto by default.
c.codec = encoding.GetCodec(proto.Name)
return nil
}
// c.contentSubtype is already lowercased in CallContentSubtype
c.codec = encoding.GetCodec(c.contentSubtype)
if c.codec == nil {
return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
}
return nil
}
// parseDialTarget returns the network and address to pass to dialer
func parseDialTarget(target string) (net string, addr string) {
net = "tcp"
m1 := strings.Index(target, ":")
m2 := strings.Index(target, ":/")
// handle unix:addr which will fail with url.Parse
if m1 >= 0 && m2 < 0 {
if n := target[0:m1]; n == "unix" {
net = n
addr = target[m1+1:]
return net, addr
}
}
if m2 >= 0 {
t, err := url.Parse(target)
if err != nil {
return net, target
}
scheme := t.Scheme
addr = t.Path
if scheme == "unix" {
net = scheme
if addr == "" {
addr = t.Host
}
return net, addr
}
}
return net, target
}
// channelzData is used to store channelz related data for ClientConn, addrConn and Server.
// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic
// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
type channelzData struct {
callsStarted int64
callsFailed int64
callsSucceeded int64
// lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of
// time.Time since it's more costly to atomically update time.Time variable than int64 variable.
lastCallStartedTime int64
}
// The SupportPackageIsVersion variables are referenced from generated protocol
// buffer files to ensure compatibility with the gRPC version used. The latest
// support package version is 5.
//
// Older versions are kept for compatibility. They may be removed if
// compatibility cannot be maintained.
//
// These constants should not be referenced from any other code.
const (
SupportPackageIsVersion3 = true
SupportPackageIsVersion4 = true
SupportPackageIsVersion5 = true
)
const grpcUA = "grpc-go/" + Version
| {
return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
} |
index_object.py | class IndexObject:
hash: str
crc32: int
pack_end_offset: int
pack_start_offset: int
def | (self, hash: str, crc32: int, pack_start_offset: int):
self.hash = hash
self.crc32 = crc32
self.pack_start_offset = pack_start_offset | __init__ |
enable-s3-encryption.py | #!/usr/bin/env python3
import argparse
import boto3
from botocore.exceptions import ClientError
parser = argparse.ArgumentParser(description='Check all S3 buckets in the AWS account and enables default encryption with AES256')
parser.add_argument('aws_account_name', type=str, help='Named AWS user account') | args = parser.parse_args()
session = boto3.session.Session(profile_name=args.aws_account_name)
s3 = session.client(service_name='s3')
enc_config = {
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256'
}
},
]
}
for bucket in s3.list_buckets()['Buckets']:
try:
enc_algorithm = s3.get_bucket_encryption(Bucket=bucket['Name'])['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm']
print('Bucket %s has default server-side encryption enabled with %s' % (bucket['Name'],enc_algorithm))
except ClientError as e:
if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
print('Bucket: %s does not have default server-side encryption enabled' % bucket['Name'])
try:
s3.put_bucket_encryption(Bucket=bucket['Name'],ServerSideEncryptionConfiguration=enc_config)
print('Enabled encryption on bucket: %s' % bucket['Name'])
except ClientError as e:
print(e.response['Error']['Code'])
else:
print(e.response['Error']['Code']) | |
admin.py | from django.conf import settings
from django.contrib import admin
from django.db import IntegrityError
from django.db.models import Exists, OuterRef, Q
from django.utils.translation import gettext as _
from import_export import resources
from import_export.admin import ImportExportMixin, ImportExportModelAdmin
from derbot.names.models import Color, DerbyName, DerbyNumber, Jersey, Toot
from derbot.names.tasks import generate_tank, pick_number, toot_name
logger = settings.LOGGER
class TootedFilter(admin.SimpleListFilter):
title = _("Tooted")
parameter_name = "tooted"
def lookups(self, request, model_admin):
return (
("yes", _("Yes")),
("no", _("No")),
)
def queryset(self, request, queryset):
if self.value() == "yes":
return queryset.filter(Q(Exists(Toot.objects.filter(name=OuterRef("pk")))))
if self.value() == "no":
return queryset.filter(~Q(Exists(Toot.objects.filter(name=OuterRef("pk")))))
class JerseyFilter(admin.SimpleListFilter):
title = _("has jersey")
parameter_name = "jersey"
def lookups(self, request, model_admin):
return (
("yes", _("Yes")),
("no", _("No")),
)
def queryset(self, request, queryset):
if self.value() == "yes":
return queryset.filter(~Q(jersey=None))
if self.value() == "no":
return queryset.filter(Q(jersey=None))
class NameResource(resources.ModelResource):
class Meta:
model = DerbyName
skip_unchanged = True
report_skipped = True
# use_bulk = True
# batch_size = 100
def save_instance(self, instance, using_transactions=True, dry_run=False):
try:
super(NameResource, self).save_instance(
instance, using_transactions, dry_run
)
except IntegrityError:
pass
class NumberResource(resources.ModelResource):
class Meta:
model = DerbyNumber
skip_unchanged = True
report_skipped = True
# use_bulk = True
# batch_size = 100
def save_instance(self, instance, using_transactions=True, dry_run=False):
try:
super(NumberResource, self).save_instance(
instance, using_transactions, dry_run
)
except IntegrityError:
pass
@admin.register(DerbyName)
class NameAdmin(ImportExportModelAdmin):
list_display = (
"id",
"name",
"number",
"cleared",
"registered",
"archived",
"created",
"updated",
"jersey",
)
list_filter = ["registered", "cleared", "archived", JerseyFilter, TootedFilter]
actions = [
"clear",
"unclear",
"archive",
"unarchive",
"new_numbers",
"make_tanks",
"toot",
]
resource_class = NameResource
@admin.action(description="Mark selected names as cleared for tooting")
def clear(self, request, queryset):
queryset.update(cleared=True)
self.message_user(request, f"Cleared {queryset.count()} names")
@admin.action(description="Mark selected names as NOT cleared for tooting")
def unclear(self, request, queryset):
queryset.update(cleared=False)
self.message_user(request, f"Uncleared {queryset.count()} names")
@admin.action(description="Archive selected names")
def archive(self, request, queryset):
queryset.update(archived=True)
self.message_user(request, f"Archived {queryset.count()} names")
@admin.action(description="Unrchive selected names")
def unarchive(self, request, queryset):
queryset.update(archived=False)
self.message_user(request, f"Unarchived {queryset.count()} names")
@admin.action(description="Choose (new) numbers for selected names")
def new_numbers(self, request, queryset):
for name in queryset:
print(name)
logger.info(f"Picking new number for {name}")
pick_number.delay(name.pk)
self.message_user(request, f"New numbers chosen for {queryset.count()} names")
@admin.action(description="Generate tanks for selected names")
def make_tanks(self, request, queryset):
for name in queryset:
print(name)
logger.info(f"Generating tank for {name}")
generate_tank.delay(name.pk, overwrite=True)
self.message_user(request, f"Tanks generated for {queryset.count()} names")
@admin.action(description="Toot selected names")
def toot(self, request, queryset):
logger.info(f"Tooting {queryset.count()} names")
for name in queryset:
logger.info(f"Tooting {name}")
toot_name.delay(name.pk, max_wait=0)
self.message_user(request, "Tooted selected names")
@admin.register(DerbyNumber)
class NumberAdmin(ImportExportModelAdmin):
list_display = ("id", "number", "created", "updated")
list_filter = ["created", "updated"]
resource_class = NumberResource
@admin.register(Jersey)
class | (ImportExportMixin, admin.ModelAdmin):
list_display = ("id", "derbyname", "fg_color", "bg_color", "image")
@admin.register(Toot)
class TootAdmin(ImportExportMixin, admin.ModelAdmin):
list_display = ("id", "name", "toot_id", "date")
@admin.register(Color)
class ColorAdmin(ImportExportMixin, admin.ModelAdmin):
list_display = ("id", "name", "hex", "pair_with")
| JerseyAdmin |
badgeHdFill.js | 'use strict';
Object.defineProperty(exports, '__esModule', { value: true });
var prefix = 'bi';
var iconName = 'badge-hd-fill';
var width = 512;
var height = 512;
var ligatures = [];
var unicode = null;
var svgPathData = 'M 64 64 A 64 64 0 0 0 0 128 L 0 384 A 64 64 0 0 0 64 448 L 448 448 A 64 64 0 0 0 512 384 L 512 128 A 64 64 0 0 0 448 64 L 64 64 z M 198.6875 160 L 236.67188 160 L 236.67188 160.03125 L 236.67188 352 L 198.6875 352 L 198.6875 269.75977 L 117.98438 269.75977 L 117.98438 352 L 80 352 L 80 160.03125 L 117.98438 160.03125 L 117.98438 238.11133 L 198.6875 238.11133 L 198.6875 160 z M 272 160.03125 L 342.01562 160.03125 C 400.38362 160.03125 427.93555 194.91153 427.93555 255.51953 C 427.93555 316.57553 400.00008 352 342.08008 352 L 272 352 L 272 160.03125 z M 309.98438 190.97656 L 309.98438 320.89648 L 336.96094 320.89648 C 372.70494 320.89648 388.86328 299.55186 388.86328 256.25586 C 388.86328 212.92786 372.54494 190.97656 336.96094 190.97656 L 309.98438 190.97656 z ';
exports.definition = {
prefix: prefix,
iconName: iconName,
icon: [
width,
height,
ligatures,
unicode,
svgPathData
]};
exports.biBadgeHdFill = exports.definition;
exports.prefix = prefix;
exports.iconName = iconName;
exports.width = width;
exports.height = height; | exports.ligatures = ligatures;
exports.unicode = unicode;
exports.svgPathData = svgPathData; | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def | ():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'WatchInSGE.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| main |
App.tsx | import { FC } from "react";
import styled from "styled-components";
import { Header } from "./components/Header";
const AppContainer = styled.div`
width: 100%;
height: 100%;
overflow: hidden;
`;
const App: FC = () => {
return (
<AppContainer>
<Header />
</AppContainer>
);
}; |
export default App; |
|
announcements.rs | //! Announcement effects.
//!
//! Announcements indicate new incoming data or events from various sources. See the top-level
//! module documentation for details.
use std::{
collections::HashMap,
fmt::{self, Display, Formatter},
};
use serde::Serialize;
use casper_types::{ExecutionResult, PublicKey};
use crate::{
components::{consensus::EraId, deploy_acceptor::Error, small_network::GossipedAddress},
effect::Responder,
types::{
Block, BlockHash, BlockHeader, Deploy, DeployHash, DeployHeader, FinalitySignature,
FinalizedBlock, Item, Timestamp,
},
utils::Source,
};
/// A networking layer announcement.
#[derive(Debug, Serialize)]
#[must_use]
pub enum NetworkAnnouncement<I, P> {
/// A payload message has been received from a peer.
MessageReceived {
/// The sender of the message
sender: I,
/// The message payload
payload: P,
},
/// Our public listening address should be gossiped across the network.
GossipOurAddress(GossipedAddress),
/// A new peer connection was established.
///
/// IMPORTANT NOTE: This announcement is a work-around for some short-term functionality. Do
/// not rely on or use this for anything without asking anyone that has written
/// this section of the code first!
NewPeer(I),
}
impl<I, P> Display for NetworkAnnouncement<I, P>
where
I: Display,
P: Display,
{
fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
match self {
NetworkAnnouncement::MessageReceived { sender, payload } => {
write!(formatter, "received from {}: {}", sender, payload)
}
NetworkAnnouncement::GossipOurAddress(_) => write!(formatter, "gossip our address"),
NetworkAnnouncement::NewPeer(id) => {
write!(formatter, "new peer connection established to {}", id)
}
}
}
}
/// An RPC API server announcement.
#[derive(Debug, Serialize)]
#[must_use]
pub enum RpcServerAnnouncement {
/// A new deploy received.
DeployReceived {
/// The received deploy.
deploy: Box<Deploy>,
/// A client responder in the case where a client submits a deploy.
responder: Option<Responder<Result<(), Error>>>,
},
}
impl Display for RpcServerAnnouncement {
fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result |
}
/// A `DeployAcceptor` announcement.
#[derive(Debug, Serialize)]
pub enum DeployAcceptorAnnouncement<I> {
/// A deploy which wasn't previously stored on this node has been accepted and stored.
AcceptedNewDeploy {
/// The new deploy.
deploy: Box<Deploy>,
/// The source (peer or client) of the deploy.
source: Source<I>,
},
/// An invalid deploy was received.
InvalidDeploy {
/// The invalid deploy.
deploy: Box<Deploy>,
/// The source (peer or client) of the deploy.
source: Source<I>,
},
}
impl<I: Display> Display for DeployAcceptorAnnouncement<I> {
fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
match self {
DeployAcceptorAnnouncement::AcceptedNewDeploy { deploy, source } => write!(
formatter,
"accepted new deploy {} from {}",
deploy.id(),
source
),
DeployAcceptorAnnouncement::InvalidDeploy { deploy, source } => {
write!(formatter, "invalid deploy {} from {}", deploy.id(), source)
}
}
}
}
/// A consensus announcement.
#[derive(Debug)]
pub enum ConsensusAnnouncement<I> {
/// A block was finalized.
Finalized(Box<FinalizedBlock>),
/// A linear chain block has been handled.
Handled(Box<BlockHeader>),
/// An equivocation has been detected.
Fault {
/// The Id of the era in which the equivocation was detected
era_id: EraId,
/// The public key of the equivocator.
public_key: Box<PublicKey>,
/// The timestamp when the evidence of the equivocation was detected.
timestamp: Timestamp,
},
/// We want to disconnect from a peer due to its transgressions.
DisconnectFromPeer(I),
}
impl<I> Display for ConsensusAnnouncement<I>
where
I: Display,
{
fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
match self {
ConsensusAnnouncement::Finalized(block) => {
write!(formatter, "finalized proto block {}", block)
}
ConsensusAnnouncement::Handled(block_header) => write!(
formatter,
"Linear chain block has been handled by consensus, height={}, hash={}",
block_header.height(),
block_header.hash()
),
ConsensusAnnouncement::Fault {
era_id,
public_key,
timestamp,
} => write!(
formatter,
"Validator fault with public key: {} has been identified at time: {} in era: {}",
public_key, timestamp, era_id,
),
ConsensusAnnouncement::DisconnectFromPeer(peer) => {
write!(formatter, "Consensus wanting to disconnect from {}", peer)
}
}
}
}
/// A BlockExecutor announcement.
#[derive(Debug)]
pub enum BlockExecutorAnnouncement {
/// A new block from the linear chain was produced.
LinearChainBlock {
/// The block.
block: Block,
/// The results of executing the deploys in this block.
execution_results: HashMap<DeployHash, (DeployHeader, ExecutionResult)>,
},
}
impl Display for BlockExecutorAnnouncement {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
BlockExecutorAnnouncement::LinearChainBlock { block, .. } => {
write!(f, "created linear chain block {}", block.hash())
}
}
}
}
/// A Gossiper announcement.
#[derive(Debug)]
pub enum GossiperAnnouncement<T: Item> {
/// A new item has been received, where the item's ID is the complete item.
NewCompleteItem(T::Id),
}
impl<T: Item> Display for GossiperAnnouncement<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
GossiperAnnouncement::NewCompleteItem(item) => write!(f, "new complete item {}", item),
}
}
}
/// A linear chain announcement.
#[derive(Debug)]
pub enum LinearChainAnnouncement {
/// A new block has been created and stored locally.
BlockAdded {
/// Block hash.
block_hash: BlockHash,
/// Block header.
block_header: Box<BlockHeader>,
},
/// New finality signature received.
NewFinalitySignature(Box<FinalitySignature>),
}
impl Display for LinearChainAnnouncement {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
LinearChainAnnouncement::BlockAdded { block_hash, .. } => {
write!(f, "block added {}", block_hash)
}
LinearChainAnnouncement::NewFinalitySignature(fs) => {
write!(f, "new finality signature {}", fs.block_hash)
}
}
}
}
| {
match self {
RpcServerAnnouncement::DeployReceived { deploy, .. } => {
write!(formatter, "api server received {}", deploy.id())
}
}
} |
actions.rs | use dominator::clone;
use crate::register::state::{Step, Step2Data};
use serde::Serialize;
use super::state::*;
use std::rc::Rc;
use shared::{
api::{ApiEndpoint, endpoints},
domain::{
meta::{AgeRangeId, AffiliationId, SubjectId},
user::CreateProfileRequest,
session::NewSessionResponse,
},
error::EmptyError,
};
use uuid::Uuid;
use wasm_bindgen::prelude::*;
use utils::{
storage,
prelude::*,
api_helpers::meta::MetaOptions
};
impl State {
pub fn pre_select(&self, meta:&MetaOptions) {
let affiliations = &mut *self.affiliations.borrow_mut();
let age_ranges = &mut *self.age_ranges.borrow_mut();
for (id, _) in meta.affiliations.iter() {
affiliations.insert(id.clone());
}
for (id, _) in meta.age_ranges.iter() {
age_ranges.insert(id.clone());
}
}
}
pub fn submit(state: Rc<State>) |
#[derive(Serialize, Debug)]
struct JsonRaw {
raw: String
}
| {
let age_ranges:Vec<AgeRangeId> = state.age_ranges
.borrow()
.iter()
.map(|id| {
AgeRangeId(Uuid::parse_str(id).unwrap_throw())
})
.collect();
let affiliations:Vec<AffiliationId> = state.affiliations
.borrow()
.iter()
.map(|id| {
AffiliationId(Uuid::parse_str(id).unwrap_throw())
})
.collect();
let subjects:Vec<SubjectId> = state.subjects
.borrow()
.iter()
.map(|id| {
SubjectId(Uuid::parse_str(id).unwrap_throw())
})
.collect();
let step_2 = state.step_2.clone();
let step_1 = step_2.step_1;
let req = CreateProfileRequest {
username: step_1.username,
over_18: true,
given_name: step_1.firstname,
family_name: step_1.lastname,
language: step_2.language,
locale: "en".to_string(),
timezone: chrono_tz::Tz::Asia__Jerusalem,
opt_into_edu_resources: step_2.marketing,
organization: Some(step_2.organization),
persona: Some(step_2.persona),
profile_image_url: step_1.oauth_profile.and_then(|p| p.profile_picture),
subjects,
age_ranges,
affiliations,
location: step_2.location_json.map(
|raw| serde_json::to_value(JsonRaw { raw }).unwrap_throw()
)
};
log::info!("{:?}", req);
state.register_loader.load(clone!(state => async move {
let (resp, status) = endpoints::user::CreateProfile::api_with_auth_status(Some(req)).await;
match resp {
Ok(resp) => {
storage::save_csrf_token(&resp.csrf);
let route:String = Route::User(UserRoute::RegisterComplete).into();
dominator::routing::go_to_url(&route);
},
Err(err) => {
let msg = match status {
401 => Some(crate::strings::STR_NOT_AUTHORIZED),
409 => Some(crate::strings::STR_USER_EXISTS),
422 => Some(crate::strings::STR_EMPTY_USERNAME),
_ => None
};
if let Some(msg) = msg {
web_sys::window().unwrap_throw().alert_with_message(msg);
} else {
log::error!("unexpected technical error!");
panic!("{:?}", err);
}
}
}
}));
} |
annotator.py |
from ..base import *
import capstone
import pyvex
class AngrColorSimprocedures(NodeAnnotator):
def __init__(self):
super(AngrColorSimprocedures, self).__init__()
def annotate_node(self, node):
if node.obj.is_simprocedure:
if node.obj.simprocedure_name in ['PathTerminator','ReturnUnconstrained','UnresolvableTarget']:
node.style = 'filled'
node.fillcolor = '#ffcccc'
else:
node.style = 'filled'
node.fillcolor = '#dddddd'
class AngrColorExit(NodeAnnotator):
def __init__(self):
super(AngrColorExit, self).__init__()
def annotate_node(self, node):
if not node.obj.is_simprocedure:
found = False
for e in self.graph.edges:
if e.src == node:
found = True
if 'jumpkind' in e.meta and e.meta['jumpkind'] == 'Ijk_Ret':
node.style = 'filled'
node.fillcolor = '#ddffdd'
if not found:
node.style = 'filled'
node.fillcolor = '#ddffdd'
class AngrColorEntry(NodeAnnotator):
def __init__(self):
super(AngrColorEntry, self).__init__()
def annotate_node(self, node):
if not node.obj.is_simprocedure:
if hasattr(node.obj, 'function_address') and node.obj.addr == node.obj.function_address:
node.style = 'filled'
node.fillcolor = '#ffffcc'
class AngrColorEdgesVex(EdgeAnnotator):
EDGECOLOR_CONDITIONAL_TRUE = 'green'
EDGECOLOR_CONDITIONAL_FALSE = 'red'
EDGECOLOR_UNCONDITIONAL = 'blue'
EDGECOLOR_CALL = 'black'
EDGECOLOR_RET = 'grey'
EDGECOLOR_UNKNOWN = 'yellow'
def __init__(self):
super(AngrColorEdgesVex, self).__init__()
def annotate_edge(self, edge):
vex = None
if 'jumpkind' in edge.meta:
jk = edge.meta['jumpkind']
if jk == 'Ijk_Ret':
edge.color = self.EDGECOLOR_RET
elif jk == 'Ijk_FakeRet':
edge.color = self.EDGECOLOR_RET
edge.style = 'dashed'
elif jk == 'Ijk_Call':
edge.color = self.EDGECOLOR_CALL
if 'vex' in edge.src.content:
vex = edge.src.content['vex']['vex']
if len (vex.next.constants) == 1 and vex.next.constants[0].value != edge.dst.obj.addr:
edge.style='dotted'
elif jk == 'Ijk_Boring':
if 'vex' in edge.src.content:
vex = edge.src.content['vex']['vex']
if len(vex.constant_jump_targets) > 1:
if len (vex.next.constants) == 1:
if edge.dst.obj.addr == vex.next.constants[0].value:
edge.color=self.EDGECOLOR_CONDITIONAL_FALSE
else:
edge.color=self.EDGECOLOR_CONDITIONAL_TRUE
else:
edge.color=self.EDGECOLOR_UNKNOWN
else:
edge.color=self.EDGECOLOR_UNCONDITIONAL
else:
edge.color=self.EDGECOLOR_UNCONDITIONAL
else:
#TODO warning
edge.color = self.EDGECOLOR_UNKNOWN
else:
edge.color = self.EDGECOLOR_UNKNOWN
class AngrPathAnnotator(EdgeAnnotator, NodeAnnotator):
def __init__(self, path):
super(AngrPathAnnotator, self).__init__()
self.path = path
self.trace = list(path.addr_trace)
def set_graph(self, graph):
super(AngrPathAnnotator, self).set_graph(graph)
self.vaddr = self.valid_addrs()
ftrace = filter(lambda _: _ in self.vaddr, self.trace)
self.edges_hit = set(zip(ftrace[:-1], ftrace[1:]))
def valid_addrs(self):
vaddr = set()
for n in self.graph.nodes:
vaddr.add(n.obj.addr)
return vaddr
#TODO add caching
#TODO not sure if this is valid
def node_hit(self, node):
ck = list(node.callstack_key)
ck.append(node.addr)
rtrace = list(reversed(self.trace))
found = True
si = 0
for c in reversed(ck):
if c == None:
break
try:
si = rtrace[si:].index(c)
except:
found = False
break
return found
def annotate_edge(self, edge):
key = (edge.src.obj.addr, edge.dst.obj.addr)
if key in self.edges_hit and self.node_hit(edge.src.obj) and self.node_hit(edge.dst.obj):
edge.width = 3
edge.color = 'red'
def annotate_node(self, node):
if self.node_hit(node.obj):
node.width = 3
node.color = 'red'
class AngrBackwardSliceAnnotatorVex(ContentAnnotator):
def __init__(self, bs):
super(AngrBackwardSliceAnnotatorVex, self).__init__('vex')
self.bs = bs
self.targets = set(self.bs._targets)
def register(self, content):
content.add_column_before('taint')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
st = self.bs.chosen_statements[node.obj.addr]
for k in range(len(content['data'])):
c = content['data'][k]
if k in st:
c['addr']['style'] = 'B'
c['statement']['style'] = 'B'
c['taint'] = {
'content':'[*]',
'style':'B'
}
if (node.obj, k) in self.targets:
c['addr']['color'] = 'red'
c['statement']['color'] = 'red'
class AngrBackwardSliceAnnotatorAsm(ContentAnnotator):
def __init__(self, bs):
super(AngrBackwardSliceAnnotatorAsm, self).__init__('asm')
self.bs = bs
self.targets = set(self.bs._targets)
def register(self, content):
content.add_column_before('taint')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
st = self.bs.chosen_statements[node.obj.addr]
staddr = set()
#TODO
vex = self.bs.project.factory.block(addr=node.obj.addr, size=node.obj.size).vex
caddr = None
for j, s in enumerate(vex.statements):
if isinstance(s, pyvex.stmt.IMark):
caddr = s.addr
if j in st:
staddr.add(caddr)
for c in content['data']:
if c['_addr'] in staddr:
c['addr']['style'] = 'B'
c['mnemonic']['style'] = 'B'
c['operands']['style'] = 'B'
c['taint'] = {
'content':'[*]',
'style':'B'
}
class AngrColorDDGStmtEdges(EdgeAnnotator):
def __init__(self,project=None):
super(AngrColorDDGStmtEdges, self).__init__()
self.project = project
def annotate_edge(self, edge):
if 'type' in edge.meta:
if edge.meta['type'] == 'tmp':
edge.color = 'blue'
edge.label = 't'+ str(edge.meta['data'])
elif edge.meta['type'] == 'reg':
edge.color = 'green'
if self.project:
edge.label = self.project.arch.register_names[edge.meta['data'].reg] + " " + str(edge.meta['data'].size)
else:
edge.label = "reg"+str(edge.meta['data'].reg) + " " + str(edge.meta['data'].size)
elif edge.meta['type'] == 'mem':
edge.color = 'red'
edge.label = str(edge.meta['data'])
else:
edge.label = edge.meta['type']
edge.style = 'dotted'
class AngrColorDDGData(EdgeAnnotator, NodeAnnotator):
def __init__(self,project=None, labels=False):
super(AngrColorDDGData, self).__init__()
self.project = project
self.labels = labels
def annotate_edge(self, edge):
if 'type' in edge.meta:
if edge.meta['type'] == 'kill':
edge.color = 'red'
elif edge.meta['type'] == 'mem_addr':
edge.color = 'blue'
edge.style = 'dotted'
elif edge.meta['type'] == 'mem_data':
edge.color = 'blue'
else:
edge.color = 'yellow'
if self.labels:
|
def annotate_node(self, node):
if node.obj.initial:
node.fillcolor = '#ccffcc'
node.style = 'filled'
class AngrActionAnnotatorVex(ContentAnnotator):
def __init__(self):
super(AngrActionAnnotatorVex, self).__init__('vex')
def register(self, content):
content.add_column_after('action_type')
content.add_column_after('action_addr')
content.add_column_after('action_data')
def annotate_content(self, node, content):
from simuvex.s_action import SimActionData
if node.obj.is_simprocedure or node.obj.is_syscall:
return
if len(node.obj.final_states) > 0:
state = node.obj.final_states[0]
for action in state.log.actions:
if isinstance(action, SimActionData):
c = content['data'][action.stmt_idx]
c['action_type'] = {
'content': action.type+"/"+action.action+"("+str(action.size.ast)+")",
'align': 'LEFT'
}
#TODO
if str(action.addr) != 'None':
c['action_addr'] = {
'content': str(action.addr.ast),
'align': 'LEFT'
}
if str(action.data) != 'None':
c['action_data'] = {
'content': str(action.data.ast),
'align': 'LEFT'
}
#EXPERIMENTAL
class AngrCodelocLogAnnotator(ContentAnnotator):
def __init__(self, cllog):
super(AngrCodelocLogAnnotator, self).__init__('vex')
self.cllog = cllog
def register(self, content):
content.add_column_after('log')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
for k in range(len(content['data'])):
c = content['data'][k]
key = (node.obj.addr, k)
if key in self.cllog:
c['log'] = {
'content': self.cllog[key],
'align':'LEFT'
}
class AngrCommentsAsm(ContentAnnotator):
def __init__(self, project):
super(AngrCommentsAsm, self).__init__('asm')
self.project = project
def register(self, content):
content.add_column_after('comment')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
comments_by_addr = {}
if len(node.obj.final_states) > 0:
state = node.obj.final_states[0]
for action in state.log.actions:
label = ''
if action.type == 'mem' or action.type == 'reg':
if isinstance(action.data.ast, int) or action.data.ast.concrete:
d = state.se.any_int(action.data.ast)
if d in self.project.kb.labels:
label += 'data=' + self.project.kb.labels[d] + ' '
if isinstance(action.addr.ast, int) or action.addr.ast.concrete:
a = state.se.any_int(action.addr.ast)
if a in self.project.kb.labels:
label += 'addr=' + self.project.kb.labels[a] + ' '
if action.type == 'exit':
if action.target.ast.concrete:
a = state.se.any_int(action.target.ast)
if a in self.project.kb.labels:
label += self.project.kb.labels[a] + ' '
if label != '':
comments_by_addr[action.ins_addr] = label
for k in content['data']:
ins = k['_ins']
if ins.address in comments_by_addr:
if not ('comment' in k and 'content' in k['comment']):
k['comment'] = {
'content': "; " + comments_by_addr[ins.address][:100]
}
else:
k['comment']['content'] += ", " + comments_by_addr[ins.address][:100]
k['comment']['color'] = 'gray'
k['comment']['align'] = 'LEFT'
class AngrCommentsDataRef(ContentAnnotator):
def __init__(self, project):
super(AngrCommentsDataRef, self).__init__('asm')
self.project = project
def register(self, content):
content.add_column_after('comment')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
comments_by_addr = {}
for dr in node.obj.accessed_data_references:
if dr.sort == 'string':
comments_by_addr[dr.insn_addr] = dr.content
for k in content['data']:
ins = k['_ins']
if ins.address in comments_by_addr:
if not ('comment' in k and 'content' in k['comment']):
k['comment'] = {
'content': "; " + comments_by_addr[ins.address][:100]
}
else:
k['comment']['content'] += ", " + comments_by_addr[ins.address][:100]
k['comment']['color'] = 'gray'
k['comment']['align'] = 'LEFT'
class AngrVariables(ContentAnnotator):
def __init__(self, project, debug=False):
super(AngrVariables, self).__init__('asm')
self.project = project
self.debug = debug
def register(self, content):
content.add_column_before('variables')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
vm = self.project.kb.variables[node.obj.function_address]
for k in content['data']:
ins = k['_ins']
vars = vm.find_variables_by_insn(ins.address, 'memory')
if vars:
for var in vars:
if not 'variables' in k:
k['variables'] = {'content':''}
k['variables']['content'] += repr(var[0].name + (' (' + var[0].ident + ')' if self.debug else '') )
k['variables']['color'] = 'lightblue'
k['variables']['align'] = 'LEFT'
| edge.label = edge.meta['type'] |
slice.py | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable
from .. import QuerySetReader, BaseRecursiveDriver
if False:
from ...proto import jina_pb2
class SliceQL(QuerySetReader, BaseRecursiveDriver):
"""Restrict the size of the ``docs`` to ``k`` (given by the request)
Example::
- !ReduceAllDriver
with:
granularity_range: [0, 0]
adjacency_range: [0, 1]
- !SortQL
with:
reverse: true
field: 'score.value'
granularity_range: [0, 0]
adjacency_range: [0, 1]
- !SliceQL
with:
start: 0
end: 50
granularity_range: [0, 0]
adjacency_range: [0, 1]
`SliceQL` will ensure that only the first 50 documents are returned from this `Pod`
"""
def __init__(self, start: int, end: int = None, *args, **kwargs):
"""
:param start: Zero-based index at which to start extraction.
:param end: Zero-based index before which to end extraction.
slice extracts up to but not including end. For example, take(1,4) extracts
the second element through the fourth element (elements indexed 1, 2, and 3).
"""
super().__init__(*args, **kwargs)
self._start = int(start)
self._end = int(end)
self.is_apply = False
def _apply_all(self, docs: Iterable['jina_pb2.Document'], *args, **kwargs):
| if self.start <= 0 and (self.end is None or self.end >= len(docs)):
pass
else:
del docs[int(self.end):]
del docs[:int(self.start)] |
|
distributor.py | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. | # You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
from twisted.internet import defer
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
logger = logging.getLogger(__name__)
def user_left_room(distributor, user, room_id):
distributor.fire("user_left_room", user=user, room_id=room_id)
class Distributor:
"""A central dispatch point for loosely-connected pieces of code to
register, observe, and fire signals.
Signals are named simply by strings.
TODO(paul): It would be nice to give signals stronger object identities,
so we can attach metadata, docstrings, detect typos, etc... But this
model will do for today.
"""
def __init__(self):
self.signals = {}
self.pre_registration = {}
def declare(self, name):
if name in self.signals:
raise KeyError("%r already has a signal named %s" % (self, name))
self.signals[name] = Signal(name)
if name in self.pre_registration:
signal = self.signals[name]
for observer in self.pre_registration[name]:
signal.observe(observer)
def observe(self, name, observer):
if name in self.signals:
self.signals[name].observe(observer)
else:
# TODO: Avoid strong ordering dependency by allowing people to
# pre-register observations on signals that don't exist yet.
if name not in self.pre_registration:
self.pre_registration[name] = []
self.pre_registration[name].append(observer)
def fire(self, name, *args, **kwargs):
"""Dispatches the given signal to the registered observers.
Runs the observers as a background process. Does not return a deferred.
"""
if name not in self.signals:
raise KeyError("%r does not have a signal named %s" % (self, name))
run_as_background_process(name, self.signals[name].fire, *args, **kwargs)
class Signal:
"""A Signal is a dispatch point that stores a list of callables as
observers of it.
Signals can be "fired", meaning that every callable observing it is
invoked. Firing a signal does not change its state; it can be fired again
at any later point. Firing a signal passes any arguments from the fire
method into all of the observers.
"""
def __init__(self, name):
self.name = name
self.observers = []
def observe(self, observer):
"""Adds a new callable to the observer list which will be invoked by
the 'fire' method.
Each observer callable may return a Deferred."""
self.observers.append(observer)
def fire(self, *args, **kwargs):
"""Invokes every callable in the observer list, passing in the args and
kwargs. Exceptions thrown by observers are logged but ignored. It is
not an error to fire a signal with no observers.
Returns a Deferred that will complete when all the observers have
completed."""
async def do(observer):
try:
result = observer(*args, **kwargs)
if inspect.isawaitable(result):
result = await result
return result
except Exception as e:
logger.warning(
"%s signal observer %s failed: %r", self.name, observer, e,
)
deferreds = [run_in_background(do, o) for o in self.observers]
return make_deferred_yieldable(
defer.gatherResults(deferreds, consumeErrors=True)
)
def __repr__(self):
return "<Signal name=%r>" % (self.name,) | |
antiflood_sql.py | try:
from userbot.modules.sql_helper import SESSION, BASE
except ImportError:
raise AttributeError
import threading
from sqlalchemy import Integer, Column, String, UnicodeText, func, distinct, Boolean
DEF_COUNT = 0
DEF_LIMIT = 0
DEF_OBJ = (None, DEF_COUNT, DEF_LIMIT)
class FloodControl(BASE):
__tablename__ = "antiflood"
chat_id = Column(String(14), primary_key=True)
user_id = Column(Integer)
count = Column(Integer, default=DEF_COUNT)
limit = Column(Integer, default=DEF_LIMIT)
def __init__(self, chat_id):
self.chat_id = str(chat_id) # ensure string
def __repr__(self):
return "<flood control for %s>" % self.chat_id
FloodControl.__table__.create(checkfirst=True)
INSERTION_LOCK = threading.RLock()
CHAT_FLOOD = {}
def set_flood(chat_id, amount):
with INSERTION_LOCK:
flood = SESSION.query(FloodControl).get(str(chat_id))
if not flood:
flood = FloodControl(str(chat_id))
flood.user_id = None
flood.limit = amount
CHAT_FLOOD[str(chat_id)] = (None, DEF_COUNT, amount)
SESSION.add(flood)
SESSION.commit()
def update_flood(chat_id: str, user_id) -> bool:
if str(chat_id) in CHAT_FLOOD:
curr_user_id, count, limit = CHAT_FLOOD.get(str(chat_id), DEF_OBJ)
if limit == 0: # no antiflood
return False
if user_id != curr_user_id or user_id is None: # other user
CHAT_FLOOD[str(chat_id)] = (user_id, DEF_COUNT + 1, limit)
return False
count += 1
if count > limit: # too many msgs, kick
|
# default -> update
CHAT_FLOOD[str(chat_id)] = (user_id, count, limit)
return False
def get_flood_limit(chat_id):
return CHAT_FLOOD.get(str(chat_id), DEF_OBJ)[2]
def migrate_chat(old_chat_id, new_chat_id):
with INSERTION_LOCK:
flood = SESSION.query(FloodControl).get(str(old_chat_id))
if flood:
CHAT_FLOOD[str(new_chat_id)] = CHAT_FLOOD.get(str(old_chat_id), DEF_OBJ)
flood.chat_id = str(new_chat_id)
SESSION.commit()
SESSION.close()
def __load_flood_settings():
global CHAT_FLOOD
try:
all_chats = SESSION.query(FloodControl).all()
CHAT_FLOOD = {chat.chat_id: (None, DEF_COUNT, chat.limit) for chat in all_chats}
finally:
SESSION.close()
return CHAT_FLOOD
| CHAT_FLOOD[str(chat_id)] = (None, DEF_COUNT, limit)
return True |
checkbox.rs | // Copyright 2019 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A checkbox widget.
use crate::kurbo::{BezPath, Point, RoundedRect, Size};
use crate::piet::{LineCap, LineJoin, LinearGradient, RenderContext, StrokeStyle, UnitPoint};
use crate::theme;
use crate::widget::Align;
use crate::{
BoxConstraints, Env, Event, EventCtx, LayoutCtx, LifeCycle, LifeCycleCtx, PaintCtx, UpdateCtx,
Widget,
};
/// A checkbox that toggles a boolean
#[derive(Debug, Clone, Default)]
pub struct Checkbox;
impl Checkbox {
pub fn new() -> impl Widget<bool> {
Align::vertical(UnitPoint::CENTER, Self::default())
}
}
impl Widget<bool> for Checkbox {
fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut bool, _env: &Env) {
match event {
Event::MouseDown(_) => {
ctx.set_active(true);
ctx.request_paint();
} | ctx.set_active(false);
if ctx.is_hot() {
if *data {
*data = false;
} else {
*data = true;
}
}
ctx.request_paint();
}
}
_ => (),
}
}
fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, _data: &bool, _env: &Env) {
if let LifeCycle::HotChanged(_) = event {
ctx.request_paint();
}
}
fn update(&mut self, ctx: &mut UpdateCtx, _old_data: &bool, _data: &bool, _env: &Env) {
ctx.request_paint();
}
fn layout(
&mut self,
_layout_ctx: &mut LayoutCtx,
bc: &BoxConstraints,
_data: &bool,
env: &Env,
) -> Size {
bc.debug_check("Checkbox");
bc.constrain(Size::new(
env.get(theme::BASIC_WIDGET_HEIGHT),
env.get(theme::BASIC_WIDGET_HEIGHT),
))
}
fn paint(&mut self, paint_ctx: &mut PaintCtx, data: &bool, env: &Env) {
let size = env.get(theme::BASIC_WIDGET_HEIGHT);
let rect =
RoundedRect::from_origin_size(Point::ORIGIN, Size::new(size, size).to_vec2(), 2.);
//Paint the background
let background_gradient = LinearGradient::new(
UnitPoint::TOP,
UnitPoint::BOTTOM,
(
env.get(theme::BACKGROUND_LIGHT),
env.get(theme::BACKGROUND_DARK),
),
);
paint_ctx.fill(rect, &background_gradient);
let border_color = if paint_ctx.is_hot() {
env.get(theme::BORDER_LIGHT)
} else {
env.get(theme::BORDER_DARK)
};
paint_ctx.stroke(rect, &border_color, 1.);
if *data {
let mut path = BezPath::new();
path.move_to((4.0, 9.0));
path.line_to((8.0, 13.0));
path.line_to((14.0, 5.0));
let mut style = StrokeStyle::new();
style.set_line_cap(LineCap::Round);
style.set_line_join(LineJoin::Round);
paint_ctx.stroke_styled(path, &env.get(theme::LABEL_COLOR), 2., &style);
}
}
} | Event::MouseUp(_) => {
if ctx.is_active() { |
index.js | import { ABC, LETTER_WIDTH, LETTER_HEIGHT } from './serift';
const STRING_LENGTH = 8;
export default function convertText2Smiles(text = '', bgSymbol = '🤘', textSymbol = '💀', strLength = STRING_LENGTH) {
if (!text.length) return text;
function getLet | lineIndex) {
let letter;
const POSITION = lineIndex * LETTER_WIDTH;
switch (symb) {
case "'":
case '"':
letter = ABC.quote;
break;
default:
letter = ABC[symb];
}
if (!letter) {
letter = ABC[' '];
}
return letter.slice(POSITION, POSITION + (LETTER_WIDTH - 1));
}
function generateLine(str, result = '', secondLine = false) {
let currentResult = result;
const currentString = str.slice(0, strLength);
const surplusString = str.slice(strLength);
for (let lineIndex = secondLine ? 1 : 0; lineIndex < LETTER_HEIGHT; lineIndex += 1) {
for (let letterIndex = 0; letterIndex < currentString.length; letterIndex += 1) {
const letter = currentString[letterIndex];
currentResult += getLetter(letter, lineIndex);
}
currentResult += `${bgSymbol}\n`;
}
if (surplusString.length > 0) {
return generateLine(surplusString, currentResult, true);
}
return currentResult;
}
return generateLine(text.toLowerCase()).replace(/\./g, bgSymbol).replace(/\*/g, textSymbol);
}
| ter(symb, |
index.ts | describe('pipedoc', () => { | it('should work', async () => {
expect(true).toBe(true);
});
}); | |
mq.go | package data
import (
"context"
"fmt"
"github.com/golang/protobuf/proto"
pb "github.com/beneath-hq/beneath/infra/engine/proto"
) |
const (
writeRequestsTopic = "write-requests"
writeRequestsSubscription = "write-requests-worker"
writeReportsTopic = "write-reports"
writeReportsSubscription = "write-reports-reader"
)
// QueueWriteRequest queues a write request -- concretely, it results in
// the write request being written to Pubsub, then from there read by
// the data processing pipeline and written to BigTable and BigQuery
func (s *Service) QueueWriteRequest(ctx context.Context, req *pb.WriteRequest) error {
msg, err := proto.Marshal(req)
if err != nil {
return err
}
if len(msg) > s.MQ.MaxMessageSize() {
return fmt.Errorf("total write size <%d bytes> exceeds maximum <%d bytes>", len(msg), s.MQ.MaxMessageSize())
}
return s.MQ.Publish(ctx, writeRequestsTopic, msg, nil)
}
// ReadWriteRequests triggers fn for every WriteRecordsRequest that's written with QueueWriteRequest
func (s *Service) ReadWriteRequests(ctx context.Context, fn func(context.Context, *pb.WriteRequest) error) error {
return s.MQ.Subscribe(ctx, writeRequestsTopic, writeRequestsSubscription, true, false, func(ctx context.Context, msg []byte) error {
req := &pb.WriteRequest{}
err := proto.Unmarshal(msg, req)
if err != nil {
return err
}
return fn(ctx, req)
})
}
// QueueWriteReport publishes a WriteReport (used to notify of completed processing of a WriteRequest)
func (s *Service) QueueWriteReport(ctx context.Context, rep *pb.WriteReport) error {
msg, err := proto.Marshal(rep)
if err != nil {
panic(err)
}
if len(msg) > s.MQ.MaxMessageSize() {
return fmt.Errorf("total write report size <%d bytes> exceeds maximum <%d bytes>", len(msg), s.MQ.MaxMessageSize())
}
return s.MQ.Publish(ctx, writeReportsTopic, msg, nil)
}
// ReadWriteReports reads messages published with QueueWriteReport
func (s *Service) ReadWriteReports(ctx context.Context, fn func(context.Context, *pb.WriteReport) error) error {
return s.MQ.Subscribe(ctx, writeReportsTopic, writeReportsSubscription, false, false, func(ctx context.Context, msg []byte) error {
rep := &pb.WriteReport{}
err := proto.Unmarshal(msg, rep)
if err != nil {
return err
}
return fn(ctx, rep)
})
} | |
Collapse.d.ts | import CollapsePanel from './CollapsePanel';
export interface CollapseProps {
activeKey?: Array<string> | string;
defaultActiveKey?: Array<string>;
/** 手风琴效果 */
accordion?: boolean;
onChange?: (key: string | string[]) => void;
style?: React.CSSProperties;
className?: string;
bordered?: boolean;
prefixCls?: string;
}
export default class Collapse extends React.Component<CollapseProps, any> {
static Panel: typeof CollapsePanel;
static defaultProps: {
prefixCls: string;
bordered: boolean;
openAnimation: {
appear(): void;
enter(node: HTMLElement, done: () => void): any;
leave(node: HTMLElement, done: () => void): any;
};
};
render(): JSX.Element;
} | /// <reference types="react" />
import * as React from 'react'; |
|
resolver.js | 'use strict';
const fs = require('fs');
const path = require('path');
const url = require('url');
const {promisify} = require('util');
const got = require('got');
const normalize = require('normalize-path');
const mime = require('mime');
const debug = require('debug')('asset-resolver');
const globby = require('globby');
const readFile = promisify(fs.readFile);
function isUrl(resource) {
return resource.startsWith('//') || resource.includes('://');
}
/**
* Token generated by concatenating username and password with `:` character within a base64 encoded string.
* @param {String} user User identifier.
* @param {String} pass Password.
* @returns {String} Base64 encoded authentication token.
*/
const token = (user, pass) =>
Buffer.from([user, pass].join(':')).toString('base64');
/**
* Get external resource
* @param {string} resource Ressource to be fetched
* @param {object} opts Option hash
* @returns {Promise} Promise
*/
function requestAsync(resource, options = {}) {
const settings = {
followRedirect: true,
// encoding: null,
https: {rejectUnauthorized: false},
retry: 0,
responseType: 'buffer'
};
if (options.user && options.pass) {
settings.headers = {
Authorization: `Basic ${token(options.user, options.pass)}`
};
}
return new Promise((resolve, reject) => {
// Handle protocol-relative urls
resource = url.resolve('http://te.st', resource); // eslint-disable-line node/no-deprecated-api
got(resource, settings)
.then((response) => {
if (response.statusCode !== 200) {
const message = `Wrong status code ${response.statusCode} for ${resource}`;
debug(message);
return reject(new Error(message));
}
const {headers = {}} = response;
const mimeType = headers['content-type'] || mime.getType(resource);
resolve({
contents: response.body,
path: resource,
mime: mimeType
});
})
.catch((error) => {
debug('Url failed:', error.message || error);
return reject(error);
});
});
}
/**
* Get local resource
* @param {string} resource Resource to be fetched
* @returns {Promise} Promise
*/
function readAsync(resource) {
return readFile(resource).then((body) => {
const mimeType = mime.getType(resource);
debug('Fetched:', resource);
return {
contents: body,
path: path.resolve(resource),
mime: mimeType
};
});
}
function join(base, file) {
if (isUrl(file)) { |
if (isUrl(base)) {
// eslint-disable-next-line node/no-deprecated-api
return url.resolve(base.endsWith('/') ? base : `${base}/`, file);
}
return path.join(base, file);
}
function glob(base) {
// eslint-disable-next-line unicorn/no-reduce
return base.reduce((result, value) => {
if (isUrl(value)) {
result.push(value);
return result;
}
if (fs.existsSync(value) && fs.lstatSync(value).isDirectory()) {
result.push(value);
return result;
}
let files = [];
try {
const pattern =
value.endsWith('/') || value.endsWith('\\')
? value.slice(0, Math.max(0, value.length - 1))
: value;
files = globby.sync([normalize(pattern)], {
nodir: false,
onlyDirectories: true,
cwd: process.cwd()
});
} catch (error) {
console.error(error.message);
}
return [...result, ...files];
}, []);
}
async function getResource(file, options) {
const {base, filter = () => true, cwd: _cwd = process.cwd(), glob = {}} =
options || {};
const cwd = glob.cwd || _cwd;
const searchBase = Array.isArray(base) ? base : [base].filter((r) => r);
const patterns = searchBase.map((base) => join(base, file));
const errors = [];
// try files first
const globPatterns = patterns
.filter((pattern) => !isUrl(pattern))
.map((dirPattern) =>
// fix for https://github.com/mrmlnc/fast-glob/issues/266
path.isAbsolute(dirPattern)
? normalize(path.relative(cwd, dirPattern))
: normalize(dirPattern)
);
const filepaths = (await globby(globPatterns, {cwd, ...glob})) || [];
if (filepaths) {
for (const filepath of filepaths) {
const resource = await readAsync(filepath);
if (await filter(resource)) {
return resource;
}
errors.push(new Error(`${filepath} rejected by filter`));
}
}
if (filepaths.length === 0 && globPatterns.length > 0) {
errors.push(new Error(`No such file or directory: ${globPatterns}`));
}
const urls = patterns.filter((resource) => isUrl(resource));
for (const url of urls) {
try {
const resource = await requestAsync(url, options);
if (await filter(resource)) {
return resource;
}
errors.push(new Error(`${url} rejected by filter`));
} catch (error) {
errors.push(error);
}
}
throw new Error(errors.map((error) => `${error.message}\n`));
}
module.exports.getResource = getResource;
module.exports.glob = glob; | return file;
} |
raw.rs | use std::{ops::Index, sync::Arc};
use ra_arena::{impl_arena_id, map::ArenaMap, Arena, RawId};
use ra_syntax::{
ast::{self, AttrsOwner, NameOwner},
AstNode, AstPtr, SmolStr, SourceFile,
};
use test_utils::tested_by;
use crate::{
db::{AstDatabase, DefDatabase},
AsName, AstIdMap, Either, FileAstId, HirFileId, ModuleSource, Name, Path,
};
/// `RawItems` is a set of top-level items in a file (except for impls).
///
/// It is the input to name resolution algorithm. `RawItems` are not invalidated
/// on most edits.
#[derive(Debug, Default, PartialEq, Eq)]
pub struct RawItems {
modules: Arena<Module, ModuleData>,
imports: Arena<ImportId, ImportData>,
defs: Arena<Def, DefData>,
macros: Arena<Macro, MacroData>,
/// items for top-level module
items: Vec<RawItem>,
}
#[derive(Debug, Default, PartialEq, Eq)]
pub struct ImportSourceMap {
map: ArenaMap<ImportId, ImportSourcePtr>,
}
type ImportSourcePtr = Either<AstPtr<ast::UseTree>, AstPtr<ast::ExternCrateItem>>;
type ImportSource = Either<ast::UseTree, ast::ExternCrateItem>;
impl ImportSourcePtr {
fn to_node(self, file: &SourceFile) -> ImportSource {
self.map(
|ptr| ptr.to_node(file.syntax()).to_owned(),
|ptr| ptr.to_node(file.syntax()).to_owned(),
)
}
}
impl ImportSourceMap {
fn insert(&mut self, import: ImportId, ptr: ImportSourcePtr) {
self.map.insert(import, ptr)
}
pub(crate) fn get(&self, source: &ModuleSource, import: ImportId) -> ImportSource {
let file = match source {
ModuleSource::SourceFile(file) => file.clone(),
ModuleSource::Module(m) => m.syntax().ancestors().find_map(SourceFile::cast).unwrap(),
};
self.map[import].to_node(&file)
}
}
impl RawItems {
pub(crate) fn raw_items_query(
db: &(impl DefDatabase + AstDatabase),
file_id: HirFileId,
) -> Arc<RawItems> {
db.raw_items_with_source_map(file_id).0
}
pub(crate) fn raw_items_with_source_map_query(
db: &(impl DefDatabase + AstDatabase),
file_id: HirFileId,
) -> (Arc<RawItems>, Arc<ImportSourceMap>) {
let mut collector = RawItemsCollector {
raw_items: RawItems::default(),
source_ast_id_map: db.ast_id_map(file_id),
source_map: ImportSourceMap::default(),
};
if let Some(node) = db.parse_or_expand(file_id) {
if let Some(source_file) = ast::SourceFile::cast(node) {
collector.process_module(None, source_file);
}
}
(Arc::new(collector.raw_items), Arc::new(collector.source_map))
}
pub(super) fn items(&self) -> &[RawItem] {
&self.items
}
}
impl Index<Module> for RawItems {
type Output = ModuleData;
fn index(&self, idx: Module) -> &ModuleData {
&self.modules[idx]
}
}
impl Index<ImportId> for RawItems {
type Output = ImportData;
fn index(&self, idx: ImportId) -> &ImportData {
&self.imports[idx]
}
}
impl Index<Def> for RawItems {
type Output = DefData;
fn index(&self, idx: Def) -> &DefData {
&self.defs[idx]
}
}
impl Index<Macro> for RawItems {
type Output = MacroData;
fn index(&self, idx: Macro) -> &MacroData {
&self.macros[idx]
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub(super) enum RawItem {
Module(Module),
Import(ImportId),
Def(Def),
Macro(Macro),
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub(super) struct Module(RawId);
impl_arena_id!(Module);
#[derive(Debug, PartialEq, Eq)]
pub(super) enum ModuleData {
Declaration {
name: Name,
ast_id: FileAstId<ast::Module>,
attr_path: Option<SmolStr>,
},
Definition {
name: Name,
ast_id: FileAstId<ast::Module>,
items: Vec<RawItem>,
attr_path: Option<SmolStr>,
},
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct ImportId(RawId);
impl_arena_id!(ImportId);
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ImportData {
pub(super) path: Path,
pub(super) alias: Option<Name>,
pub(super) is_glob: bool,
pub(super) is_prelude: bool,
pub(super) is_extern_crate: bool,
pub(super) is_macro_use: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub(super) struct Def(RawId);
impl_arena_id!(Def);
#[derive(Debug, PartialEq, Eq)]
pub(super) struct DefData {
pub(super) name: Name,
pub(super) kind: DefKind,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub(super) enum DefKind {
Function(FileAstId<ast::FnDef>),
Struct(FileAstId<ast::StructDef>),
Union(FileAstId<ast::StructDef>),
Enum(FileAstId<ast::EnumDef>),
Const(FileAstId<ast::ConstDef>),
Static(FileAstId<ast::StaticDef>),
Trait(FileAstId<ast::TraitDef>),
TypeAlias(FileAstId<ast::TypeAliasDef>),
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub(super) struct Macro(RawId);
impl_arena_id!(Macro);
#[derive(Debug, PartialEq, Eq)]
pub(super) struct MacroData {
pub(super) ast_id: FileAstId<ast::MacroCall>,
pub(super) path: Path,
pub(super) name: Option<Name>,
pub(super) export: bool,
}
struct RawItemsCollector {
raw_items: RawItems,
source_ast_id_map: Arc<AstIdMap>,
source_map: ImportSourceMap,
}
impl RawItemsCollector {
fn process_module(&mut self, current_module: Option<Module>, body: impl ast::ModuleItemOwner) {
for item_or_macro in body.items_with_macros() {
match item_or_macro {
ast::ItemOrMacro::Macro(m) => self.add_macro(current_module, m),
ast::ItemOrMacro::Item(item) => self.add_item(current_module, item),
}
}
}
fn add_item(&mut self, current_module: Option<Module>, item: ast::ModuleItem) {
let (kind, name) = match item {
ast::ModuleItem::Module(module) => {
self.add_module(current_module, module);
return;
}
ast::ModuleItem::UseItem(use_item) => {
self.add_use_item(current_module, use_item);
return;
}
ast::ModuleItem::ExternCrateItem(extern_crate) => {
self.add_extern_crate_item(current_module, extern_crate);
return;
}
ast::ModuleItem::ImplBlock(_) => {
// impls don't participate in name resolution
return;
}
ast::ModuleItem::StructDef(it) => {
let id = self.source_ast_id_map.ast_id(&it);
let name = it.name();
if it.is_union() {
(DefKind::Union(id), name)
} else {
(DefKind::Struct(id), name)
}
}
ast::ModuleItem::EnumDef(it) => {
(DefKind::Enum(self.source_ast_id_map.ast_id(&it)), it.name())
}
ast::ModuleItem::FnDef(it) => {
(DefKind::Function(self.source_ast_id_map.ast_id(&it)), it.name())
}
ast::ModuleItem::TraitDef(it) => {
(DefKind::Trait(self.source_ast_id_map.ast_id(&it)), it.name())
}
ast::ModuleItem::TypeAliasDef(it) => {
(DefKind::TypeAlias(self.source_ast_id_map.ast_id(&it)), it.name())
}
ast::ModuleItem::ConstDef(it) => {
(DefKind::Const(self.source_ast_id_map.ast_id(&it)), it.name())
}
ast::ModuleItem::StaticDef(it) => {
(DefKind::Static(self.source_ast_id_map.ast_id(&it)), it.name())
}
};
if let Some(name) = name {
let name = name.as_name();
let def = self.raw_items.defs.alloc(DefData { name, kind });
self.push_item(current_module, RawItem::Def(def))
}
}
fn add_module(&mut self, current_module: Option<Module>, module: ast::Module) {
let name = match module.name() {
Some(it) => it.as_name(),
None => return,
};
let ast_id = self.source_ast_id_map.ast_id(&module);
if module.has_semi() {
let attr_path = extract_mod_path_attribute(&module);
let item =
self.raw_items.modules.alloc(ModuleData::Declaration { name, ast_id, attr_path });
self.push_item(current_module, RawItem::Module(item));
return;
}
if let Some(item_list) = module.item_list() {
let attr_path = extract_mod_path_attribute(&module);
let item = self.raw_items.modules.alloc(ModuleData::Definition {
name,
ast_id,
items: Vec::new(),
attr_path,
});
self.process_module(Some(item), item_list);
self.push_item(current_module, RawItem::Module(item));
return;
}
tested_by!(name_res_works_for_broken_modules);
}
fn add_use_item(&mut self, current_module: Option<Module>, use_item: ast::UseItem) {
let is_prelude = use_item.has_atom_attr("prelude_import");
Path::expand_use_item(&use_item, |path, use_tree, is_glob, alias| {
let import_data = ImportData {
path,
alias,
is_glob,
is_prelude,
is_extern_crate: false,
is_macro_use: false,
};
self.push_import(current_module, import_data, Either::A(AstPtr::new(use_tree)));
})
}
fn add_extern_crate_item(
&mut self,
current_module: Option<Module>,
extern_crate: ast::ExternCrateItem,
) {
if let Some(name_ref) = extern_crate.name_ref() {
let path = Path::from_name_ref(&name_ref);
let alias = extern_crate.alias().and_then(|a| a.name()).map(|it| it.as_name());
let is_macro_use = extern_crate.has_atom_attr("macro_use");
let import_data = ImportData {
path,
alias,
is_glob: false,
is_prelude: false,
is_extern_crate: true,
is_macro_use,
};
self.push_import(current_module, import_data, Either::B(AstPtr::new(&extern_crate)));
}
}
fn add_macro(&mut self, current_module: Option<Module>, m: ast::MacroCall) {
let path = match m.path().and_then(Path::from_ast) {
Some(it) => it,
_ => return, | let name = m.name().map(|it| it.as_name());
let ast_id = self.source_ast_id_map.ast_id(&m);
let export = m.has_atom_attr("macro_export");
let m = self.raw_items.macros.alloc(MacroData { ast_id, path, name, export });
self.push_item(current_module, RawItem::Macro(m));
}
fn push_import(
&mut self,
current_module: Option<Module>,
data: ImportData,
source: ImportSourcePtr,
) {
let import = self.raw_items.imports.alloc(data);
self.source_map.insert(import, source);
self.push_item(current_module, RawItem::Import(import))
}
fn push_item(&mut self, current_module: Option<Module>, item: RawItem) {
match current_module {
Some(module) => match &mut self.raw_items.modules[module] {
ModuleData::Definition { items, .. } => items,
ModuleData::Declaration { .. } => unreachable!(),
},
None => &mut self.raw_items.items,
}
.push(item)
}
}
fn extract_mod_path_attribute(module: &ast::Module) -> Option<SmolStr> {
module.attrs().into_iter().find_map(|attr| {
attr.as_key_value().and_then(|(name, value)| {
let is_path = name == "path";
if is_path {
Some(value)
} else {
None
}
})
})
} | };
|
test_media.py | # Copyright 2020 Dirk Klimpel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from binascii import unhexlify
from parameterized import parameterized
import synapse.rest.admin
from synapse.api.errors import Codes
from synapse.rest.client.v1 import login, profile, room
from synapse.rest.media.v1.filepath import MediaFilePaths
from tests import unittest
from tests.server import FakeSite, make_request
class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
synapse.rest.admin.register_servlets_for_media_repo,
login.register_servlets,
]
def prepare(self, reactor, clock, hs):
self.media_repo = hs.get_media_repository_resource()
self.server_name = hs.hostname
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.filepaths = MediaFilePaths(hs.config.media_store_path)
def test_no_auth(self):
"""
Try to delete media without authentication.
"""
url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, "12345")
channel = self.make_request("DELETE", url, b"{}")
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self):
"""
If the user is not a server admin, an error is returned.
"""
self.other_user = self.register_user("user", "pass")
self.other_user_token = self.login("user", "pass")
url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, "12345")
channel = self.make_request(
"DELETE",
url,
access_token=self.other_user_token,
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_media_does_not_exist(self):
"""
Tests that a lookup for a media that does not exist returns a 404
"""
url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, "12345")
channel = self.make_request(
"DELETE",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_media_is_not_local(self):
"""
Tests that a lookup for a media that is not a local returns a 400
"""
url = "/_synapse/admin/v1/media/%s/%s" % ("unknown_domain", "12345")
channel = self.make_request(
"DELETE",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only delete local media", channel.json_body["error"])
def test_delete_media(self):
|
class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
synapse.rest.admin.register_servlets_for_media_repo,
login.register_servlets,
profile.register_servlets,
room.register_servlets,
]
def prepare(self, reactor, clock, hs):
self.media_repo = hs.get_media_repository_resource()
self.server_name = hs.hostname
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.filepaths = MediaFilePaths(hs.config.media_store_path)
self.url = "/_synapse/admin/v1/media/%s/delete" % self.server_name
def test_no_auth(self):
"""
Try to delete media without authentication.
"""
channel = self.make_request("POST", self.url, b"{}")
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self):
"""
If the user is not a server admin, an error is returned.
"""
self.other_user = self.register_user("user", "pass")
self.other_user_token = self.login("user", "pass")
channel = self.make_request(
"POST",
self.url,
access_token=self.other_user_token,
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_media_is_not_local(self):
"""
Tests that a lookup for media that is not local returns a 400
"""
url = "/_synapse/admin/v1/media/%s/delete" % "unknown_domain"
channel = self.make_request(
"POST",
url + "?before_ts=1234",
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only delete local media", channel.json_body["error"])
def test_missing_parameter(self):
"""
If the parameter `before_ts` is missing, an error is returned.
"""
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])
self.assertEqual(
"Missing integer query parameter b'before_ts'", channel.json_body["error"]
)
def test_invalid_parameter(self):
"""
If parameters are invalid, an error is returned.
"""
channel = self.make_request(
"POST",
self.url + "?before_ts=-1234",
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
self.assertEqual(
"Query parameter before_ts must be a string representing a positive integer.",
channel.json_body["error"],
)
channel = self.make_request(
"POST",
self.url + "?before_ts=1234&size_gt=-1234",
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
self.assertEqual(
"Query parameter size_gt must be a string representing a positive integer.",
channel.json_body["error"],
)
channel = self.make_request(
"POST",
self.url + "?before_ts=1234&keep_profiles=not_bool",
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
self.assertEqual(
"Boolean query parameter b'keep_profiles' must be one of ['true', 'false']",
channel.json_body["error"],
)
def test_delete_media_never_accessed(self):
"""
Tests that media deleted if it is older than `before_ts` and never accessed
`last_access_ts` is `NULL` and `created_ts` < `before_ts`
"""
# upload and do not access
server_and_media_id = self._create_media()
self.pump(1.0)
# test that the file exists
media_id = server_and_media_id.split("/")[1]
local_path = self.filepaths.local_media_filepath(media_id)
self.assertTrue(os.path.exists(local_path))
# timestamp after upload/create
now_ms = self.clock.time_msec()
channel = self.make_request(
"POST",
self.url + "?before_ts=" + str(now_ms),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual(
media_id,
channel.json_body["deleted_media"][0],
)
self._access_media(server_and_media_id, False)
def test_keep_media_by_date(self):
"""
Tests that media is not deleted if it is newer than `before_ts`
"""
# timestamp before upload
now_ms = self.clock.time_msec()
server_and_media_id = self._create_media()
self._access_media(server_and_media_id)
channel = self.make_request(
"POST",
self.url + "?before_ts=" + str(now_ms),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self._access_media(server_and_media_id)
# timestamp after upload
now_ms = self.clock.time_msec()
channel = self.make_request(
"POST",
self.url + "?before_ts=" + str(now_ms),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual(
server_and_media_id.split("/")[1],
channel.json_body["deleted_media"][0],
)
self._access_media(server_and_media_id, False)
def test_keep_media_by_size(self):
"""
Tests that media is not deleted if its size is smaller than or equal
to `size_gt`
"""
server_and_media_id = self._create_media()
self._access_media(server_and_media_id)
now_ms = self.clock.time_msec()
channel = self.make_request(
"POST",
self.url + "?before_ts=" + str(now_ms) + "&size_gt=67",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self._access_media(server_and_media_id)
now_ms = self.clock.time_msec()
channel = self.make_request(
"POST",
self.url + "?before_ts=" + str(now_ms) + "&size_gt=66",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual(
server_and_media_id.split("/")[1],
channel.json_body["deleted_media"][0],
)
self._access_media(server_and_media_id, False)
def test_keep_media_by_user_avatar(self):
"""
Tests that we do not delete media if is used as a user avatar
Tests parameter `keep_profiles`
"""
server_and_media_id = self._create_media()
self._access_media(server_and_media_id)
# set media as avatar
channel = self.make_request(
"PUT",
"/profile/%s/avatar_url" % (self.admin_user,),
content=json.dumps({"avatar_url": "mxc://%s" % (server_and_media_id,)}),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
now_ms = self.clock.time_msec()
channel = self.make_request(
"POST",
self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=true",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self._access_media(server_and_media_id)
now_ms = self.clock.time_msec()
channel = self.make_request(
"POST",
self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=false",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual(
server_and_media_id.split("/")[1],
channel.json_body["deleted_media"][0],
)
self._access_media(server_and_media_id, False)
def test_keep_media_by_room_avatar(self):
"""
Tests that we do not delete media if it is used as a room avatar
Tests parameter `keep_profiles`
"""
server_and_media_id = self._create_media()
self._access_media(server_and_media_id)
# set media as room avatar
room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
channel = self.make_request(
"PUT",
"/rooms/%s/state/m.room.avatar" % (room_id,),
content=json.dumps({"url": "mxc://%s" % (server_and_media_id,)}),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
now_ms = self.clock.time_msec()
channel = self.make_request(
"POST",
self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=true",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self._access_media(server_and_media_id)
now_ms = self.clock.time_msec()
channel = self.make_request(
"POST",
self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=false",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual(
server_and_media_id.split("/")[1],
channel.json_body["deleted_media"][0],
)
self._access_media(server_and_media_id, False)
def _create_media(self):
"""
Create a media and return media_id and server_and_media_id
"""
upload_resource = self.media_repo.children[b"upload"]
# file size is 67 Byte
image_data = unhexlify(
b"89504e470d0a1a0a0000000d4948445200000001000000010806"
b"0000001f15c4890000000a49444154789c63000100000500010d"
b"0a2db40000000049454e44ae426082"
)
# Upload some media into the room
response = self.helper.upload_media(
upload_resource, image_data, tok=self.admin_user_tok, expect_code=200
)
# Extract media ID from the response
server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://'
server_name = server_and_media_id.split("/")[0]
# Check that new media is a local and not remote
self.assertEqual(server_name, self.server_name)
return server_and_media_id
def _access_media(self, server_and_media_id, expect_success=True):
"""
Try to access a media and check the result
"""
download_resource = self.media_repo.children[b"download"]
media_id = server_and_media_id.split("/")[1]
local_path = self.filepaths.local_media_filepath(media_id)
channel = make_request(
self.reactor,
FakeSite(download_resource),
"GET",
server_and_media_id,
shorthand=False,
access_token=self.admin_user_tok,
)
if expect_success:
self.assertEqual(
200,
channel.code,
msg=(
"Expected to receive a 200 on accessing media: %s"
% server_and_media_id
),
)
# Test that the file exists
self.assertTrue(os.path.exists(local_path))
else:
self.assertEqual(
404,
channel.code,
msg=(
"Expected to receive a 404 on accessing deleted media: %s"
% (server_and_media_id)
),
)
# Test that the file is deleted
self.assertFalse(os.path.exists(local_path))
class ProtectMediaByIDTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
synapse.rest.admin.register_servlets_for_media_repo,
login.register_servlets,
]
def prepare(self, reactor, clock, hs):
media_repo = hs.get_media_repository_resource()
self.store = hs.get_datastore()
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
# Create media
upload_resource = media_repo.children[b"upload"]
# file size is 67 Byte
image_data = unhexlify(
b"89504e470d0a1a0a0000000d4948445200000001000000010806"
b"0000001f15c4890000000a49444154789c63000100000500010d"
b"0a2db40000000049454e44ae426082"
)
# Upload some media into the room
response = self.helper.upload_media(
upload_resource, image_data, tok=self.admin_user_tok, expect_code=200
)
# Extract media ID from the response
server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://'
self.media_id = server_and_media_id.split("/")[1]
self.url = "/_synapse/admin/v1/media/%s/%s"
@parameterized.expand(["protect", "unprotect"])
def test_no_auth(self, action: str):
"""
Try to protect media without authentication.
"""
channel = self.make_request("POST", self.url % (action, self.media_id), b"{}")
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
@parameterized.expand(["protect", "unprotect"])
def test_requester_is_no_admin(self, action: str):
"""
If the user is not a server admin, an error is returned.
"""
self.other_user = self.register_user("user", "pass")
self.other_user_token = self.login("user", "pass")
channel = self.make_request(
"POST",
self.url % (action, self.media_id),
access_token=self.other_user_token,
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_protect_media(self):
"""
Tests that protect and unprotect a media is successfully
"""
media_info = self.get_success(self.store.get_local_media(self.media_id))
self.assertFalse(media_info["safe_from_quarantine"])
# protect
channel = self.make_request(
"POST",
self.url % ("protect", self.media_id),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertFalse(channel.json_body)
media_info = self.get_success(self.store.get_local_media(self.media_id))
self.assertTrue(media_info["safe_from_quarantine"])
# unprotect
channel = self.make_request(
"POST",
self.url % ("unprotect", self.media_id),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertFalse(channel.json_body)
media_info = self.get_success(self.store.get_local_media(self.media_id))
self.assertFalse(media_info["safe_from_quarantine"])
| """
Tests that delete a media is successfully
"""
download_resource = self.media_repo.children[b"download"]
upload_resource = self.media_repo.children[b"upload"]
image_data = unhexlify(
b"89504e470d0a1a0a0000000d4948445200000001000000010806"
b"0000001f15c4890000000a49444154789c63000100000500010d"
b"0a2db40000000049454e44ae426082"
)
# Upload some media into the room
response = self.helper.upload_media(
upload_resource, image_data, tok=self.admin_user_tok, expect_code=200
)
# Extract media ID from the response
server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://'
server_name, media_id = server_and_media_id.split("/")
self.assertEqual(server_name, self.server_name)
# Attempt to access media
channel = make_request(
self.reactor,
FakeSite(download_resource),
"GET",
server_and_media_id,
shorthand=False,
access_token=self.admin_user_tok,
)
# Should be successful
self.assertEqual(
200,
channel.code,
msg=(
"Expected to receive a 200 on accessing media: %s" % server_and_media_id
),
)
# Test if the file exists
local_path = self.filepaths.local_media_filepath(media_id)
self.assertTrue(os.path.exists(local_path))
url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, media_id)
# Delete media
channel = self.make_request(
"DELETE",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual(
media_id,
channel.json_body["deleted_media"][0],
)
# Attempt to access media
channel = make_request(
self.reactor,
FakeSite(download_resource),
"GET",
server_and_media_id,
shorthand=False,
access_token=self.admin_user_tok,
)
self.assertEqual(
404,
channel.code,
msg=(
"Expected to receive a 404 on accessing deleted media: %s"
% server_and_media_id
),
)
# Test if the file is deleted
self.assertFalse(os.path.exists(local_path)) |
download_pretrained_model.py | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zipfile
from time import time
import requests
from batchgenerators.utilities.file_and_folder_operations import join, isfile
from nnunet.paths import network_training_output_dir
def get_available_models():
available_models = {
"Task001_BrainTumour": {
'description': "Brain Tumor Segmentation. \n"
"Segmentation targets are edema, enhancing tumor and necrosis, \n"
"input modalities are 0: FLAIR, 1: T1, 2: T1 with contrast agent, 3: T2. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task001_BrainTumour.zip?download=1"
},
"Task002_Heart": {
'description': "Left Atrium Segmentation. \n"
"Segmentation target is the left atrium, \n"
"input modalities are 0: MRI. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task002_Heart.zip?download=1"
},
"Task003_Liver": {
'description': "Liver and Liver Tumor Segmentation. \n"
"Segmentation targets are liver and tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task003_Liver.zip?download=1"
},
"Task004_Hippocampus": {
'description': "Hippocampus Segmentation. \n"
"Segmentation targets posterior and anterior parts of the hippocampus, \n"
"input modalities are 0: MRI. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task004_Hippocampus.zip?download=1"
},
"Task005_Prostate": {
'description': "Prostate Segmentation. \n"
"Segmentation targets are peripheral and central zone, \n"
"input modalities are 0: T2, 1: ADC. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4485926/files/Task005_Prostate.zip?download=1"
},
"Task006_Lung": {
'description': "Lung Nodule Segmentation. \n"
"Segmentation target are lung nodules, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task006_Lung.zip?download=1"
},
"Task007_Pancreas": {
'description': "Pancreas Segmentation. \n"
"Segmentation targets are pancras and pancreas tumor, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task007_Pancreas.zip?download=1"
},
"Task008_HepaticVessel": {
'description': "Hepatic Vessel Segmentation. \n"
"Segmentation targets are hepatic vesels and liver tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task008_HepaticVessel.zip?download=1"
},
"Task009_Spleen": {
'description': "Spleen Segmentation. \n"
"Segmentation target is the spleen, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task009_Spleen.zip?download=1"
},
"Task010_Colon": {
'description': "Colon Cancer Segmentation. \n"
"Segmentation target are colon caner primaries, \n"
"input modalities are 0: CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task010_Colon.zip?download=1"
},
"Task017_AbdominalOrganSegmentation": {
'description': "Multi-Atlas Labeling Beyond the Cranial Vault - Abdomen. \n"
"Segmentation targets are thirteen different abdominal organs, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see https://www.synapse.org/#!Synapse:syn3193805/wiki/217754",
'url': "https://zenodo.org/record/4003545/files/Task017_AbdominalOrganSegmentation.zip?download=1"
},
"Task024_Promise": {
'description': "Prostate MR Image Segmentation 2012. \n"
"Segmentation target is the prostate, \n"
"input modalities are 0: T2. \n"
"Also see https://promise12.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task024_Promise.zip?download=1"
},
"Task027_ACDC": {
'description': "Automatic Cardiac Diagnosis Challenge. \n"
"Segmentation targets are right ventricle, left ventricular cavity and left myocardium, \n"
"input modalities are 0: cine MRI. \n"
"Also see https://acdc.creatis.insa-lyon.fr/",
'url': "https://zenodo.org/record/4003545/files/Task027_ACDC.zip?download=1"
},
"Task029_LiTS": {
'description': "Liver and Liver Tumor Segmentation Challenge. \n"
"Segmentation targets are liver and liver tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see https://competitions.codalab.org/competitions/17094",
'url': "https://zenodo.org/record/4003545/files/Task029_LITS.zip?download=1"
},
"Task035_ISBILesionSegmentation": {
'description': "Longitudinal multiple sclerosis lesion segmentation Challenge. \n"
"Segmentation target is MS lesions, \n"
"input modalities are 0: FLAIR, 1: MPRAGE, 2: proton density, 3: T2. \n"
"Also see https://smart-stats-tools.org/lesion-challenge",
'url': "https://zenodo.org/record/4003545/files/Task035_ISBILesionSegmentation.zip?download=1"
},
"Task038_CHAOS_Task_3_5_Variant2": {
'description': "CHAOS - Combined (CT-MR) Healthy Abdominal Organ Segmentation Challenge (Task 3 & 5). \n"
"Segmentation targets are left and right kidney, liver, spleen, \n"
"input modalities are 0: T1 in-phase, T1 out-phase, T2 (can be any of those)\n"
"Also see https://chaos.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task038_CHAOS_Task_3_5_Variant2.zip?download=1"
},
"Task048_KiTS_clean": {
'description': "Kidney and Kidney Tumor Segmentation Challenge. "
"Segmentation targets kidney and kidney tumors, "
"input modalities are 0: abdominal CT scan. "
"Also see https://kits19.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task048_KiTS_clean.zip?download=1"
},
"Task055_SegTHOR": {
'description': "SegTHOR: Segmentation of THoracic Organs at Risk in CT images. \n"
"Segmentation targets are aorta, esophagus, heart and trachea, \n"
"input modalities are 0: CT scan. \n"
"Also see https://competitions.codalab.org/competitions/21145",
'url': "https://zenodo.org/record/4003545/files/Task055_SegTHOR.zip?download=1"
},
"Task061_CREMI": {
'description': "MICCAI Challenge on Circuit Reconstruction from Electron Microscopy Images (Synaptic Cleft segmentation task). \n"
"Segmentation target is synaptic clefts, \n"
"input modalities are 0: serial section transmission electron microscopy of neural tissue. \n"
"Also see https://cremi.org/",
'url': "https://zenodo.org/record/4003545/files/Task061_CREMI.zip?download=1"
},
"Task075_Fluo_C3DH_A549_ManAndSim": {
'description': "Fluo-C3DH-A549-SIM and Fluo-C3DH-A549 datasets of the cell tracking challenge. Segmentation target are C3DH cells in fluorescence microscopy images.\n"
"input modalities are 0: fluorescence_microscopy\n"
"Also see http://celltrackingchallenge.net/",
'url': "https://zenodo.org/record/4003545/files/Task075_Fluo_C3DH_A549_ManAndSim.zip?download=1"
},
"Task076_Fluo_N3DH_SIM": {
'description': "Fluo-N3DH-SIM dataset of the cell tracking challenge. Segmentation target are N3DH cells and cell borders in fluorescence microscopy images.\n"
"input modalities are 0: fluorescence_microscopy\n"
"Also see http://celltrackingchallenge.net/\n",
"Note that the segmentation output of the models are cell center and cell border. These outputs mus tbe converted to an instance segmentation for the challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task076_Fluo_N3DH_SIM.py"
'url': "https://zenodo.org/record/4003545/files/Task076_Fluo_N3DH_SIM.zip?download=1"
},
"Task089_Fluo-N2DH-SIM_thickborder_time": {
'description': "Fluo-N2DH-SIM dataset of the cell tracking challenge. Segmentation target are nuclei of N2DH cells and cell borders in fluorescence microscopy images.\n"
"input modalities are 0: t minus 4, 0: t minus 3, 0: t minus 2, 0: t minus 1, 0: frame of interest\n"
"Note that the input channels are different time steps from a time series acquisition\n"
"Note that the segmentation output of the models are cell center and cell border. These outputs mus tbe converted to an instance segmentation for the challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py"
"Also see http://celltrackingchallenge.net/",
'url': "https://zenodo.org/record/4003545/files/Task089_Fluo-N2DH-SIM_thickborder_time.zip?download=1"
},
"Task114_heart_MNMs": {
'description': "Cardiac MRI short axis images from the M&Ms challenge 2020.\n"
"input modalities are 0: MRI \n"
"See also https://www.ub.edu/mnms/ \n"
"Note: Labels of the M&Ms Challenge are not in the same order as for the ACDC challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task114_heart_mnms.py",
'url': "https://zenodo.org/record/4288464/files/Task114_heart_MNMs.zip?download=1"
},
}
return available_models
def print_available_pretrained_models():
print('The following pretrained models are available:\n')
av_models = get_available_models()
for m in av_models.keys():
print('')
print(m)
print(av_models[m]['description'])
def download_and_install_pretrained_model_by_name(taskname):
av_models = get_available_models()
if taskname not in av_models.keys():
raise RuntimeError("\nThe requested pretrained model ('%s') is not available." % taskname)
if len(av_models[taskname]['url']) == 0:
raise RuntimeError("The requested model has not been uploaded yet. Please check back in a few days")
download_and_install_from_url(av_models[taskname]['url'])
def download_and_install_from_url(url):
assert network_training_output_dir is not None, "Cannot install model because network_training_output_dir is not " \
"set (RESULTS_FOLDER missing as environment variable, see " \
"Installation instructions)"
import http.client
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
import os
home = os.path.expanduser('~')
random_number = int(time() * 1e7)
tempfile = join(home, '.nnunetdownload_%s' % str(random_number))
try:
with open(tempfile, 'wb') as f:
with requests.get(url, stream=True) as r:
r.raise_for_status()
for chunk in r.iter_content(chunk_size=8192 * 16):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
# if chunk:
f.write(chunk)
print("Download finished. Extracting...")
install_model_from_zip_file(tempfile)
print("Done")
except Exception as e:
raise e
finally:
if isfile(tempfile):
os.remove(tempfile)
def | (url, local_filename):
# borrowed from https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests
# NOTE the stream=True parameter below
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=None):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
#if chunk:
f.write(chunk)
return local_filename
def install_model_from_zip_file(zip_file: str):
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(network_training_output_dir)
def print_license_warning():
print('')
print('######################################################')
print('!!!!!!!!!!!!!!!!!!!!!!!!WARNING!!!!!!!!!!!!!!!!!!!!!!!')
print('######################################################')
print("Using the pretrained model weights is subject to the license of the dataset they were trained on. Some "
"allow commercial use, others don't. It is your responsibility to make sure you use them appropriately! Use "
"nnUNet_print_pretrained_model_info(task_name) to see a summary of the dataset and where to find its license!")
print('######################################################')
print('')
def download_by_name():
import argparse
parser = argparse.ArgumentParser(description="Use this to download pretrained models. CAREFUL: This script will "
"overwrite "
"existing models (if they share the same trainer class and plans as "
"the pretrained model")
parser.add_argument("task_name", type=str, help='Task name of the pretrained model. To see '
'available task names, run nnUNet_print_available_'
'pretrained_models')
args = parser.parse_args()
taskname = args.task_name
print_license_warning()
download_and_install_pretrained_model_by_name(taskname)
def download_by_url():
import argparse
parser = argparse.ArgumentParser(
description="Use this to download pretrained models. This script is intended to download models via url only. "
"If you want to download one of our pretrained models, please use nnUNet_download_pretrained_model. "
"CAREFUL: This script will overwrite "
"existing models (if they share the same trainer class and plans as "
"the pretrained model.")
parser.add_argument("url", type=str, help='URL of the pretrained model')
args = parser.parse_args()
url = args.url
download_and_install_from_url(url)
def install_from_zip_entry_point():
import argparse
parser = argparse.ArgumentParser(
description="Use this to install a zip file containing a pretrained model.")
parser.add_argument("zip", type=str, help='zip file')
args = parser.parse_args()
zip = args.zip
install_model_from_zip_file(zip)
def print_pretrained_model_requirements():
import argparse
parser = argparse.ArgumentParser(description="Use this to see the properties of a pretrained model, especially "
"what input modalities it requires")
parser.add_argument("task_name", type=str, help='Task name of the pretrained model. To see '
'available task names, run nnUNet_print_available_'
'pretrained_models')
args = parser.parse_args()
taskname = args.task_name
av = get_available_models()
if taskname not in av.keys():
raise RuntimeError("Invalid task name. This pretrained model does not exist. To see available task names, "
"run nnUNet_print_available_pretrained_models")
print(av[taskname]['description'])
if __name__ == '__main__':
url = 'https://www.dropbox.com/s/ft54q1gi060vm2x/Task004_Hippocampus.zip?dl=1' | download_file |
basic_scheduler.rs | use crate::future::poll_fn;
use crate::loom::sync::atomic::AtomicBool;
use crate::loom::sync::Mutex;
use crate::park::{Park, Unpark};
use crate::runtime::context::EnterGuard;
use crate::runtime::driver::Driver;
use crate::runtime::stats::{RuntimeStats, WorkerStatsBatcher};
use crate::runtime::task::{self, JoinHandle, OwnedTasks, Schedule, Task};
use crate::runtime::Callback;
use crate::sync::notify::Notify;
use crate::util::atomic_cell::AtomicCell;
use crate::util::{waker_ref, Wake, WakerRef};
use std::cell::RefCell;
use std::collections::VecDeque;
use std::fmt;
use std::future::Future;
use std::sync::atomic::Ordering::{AcqRel, Release};
use std::sync::Arc;
use std::task::Poll::{Pending, Ready};
use std::time::Duration;
/// Executes tasks on the current thread
pub(crate) struct BasicScheduler {
/// Core scheduler data is acquired by a thread entering `block_on`.
core: AtomicCell<Core>,
/// Notifier for waking up other threads to steal the
/// driver.
notify: Notify,
/// Sendable task spawner
spawner: Spawner,
/// This is usually None, but right before dropping the BasicScheduler, it
/// is changed to `Some` with the context being the runtime's own context.
/// This ensures that any tasks dropped in the `BasicScheduler`s destructor
/// run in that runtime's context.
context_guard: Option<EnterGuard>,
}
/// Data required for executing the scheduler. The struct is passed around to
/// a function that will perform the scheduling work and acts as a capability token.
struct Core {
/// Scheduler run queue
tasks: VecDeque<task::Notified<Arc<Shared>>>,
/// Sendable task spawner
spawner: Spawner,
/// Current tick
tick: u8,
/// Runtime driver
///
/// The driver is removed before starting to park the thread
driver: Option<Driver>,
/// Stats batcher
stats: WorkerStatsBatcher,
}
#[derive(Clone)]
pub(crate) struct Spawner {
shared: Arc<Shared>,
}
/// A remote scheduler entry.
///
/// These are filled in by remote threads sending instructions to the scheduler.
enum RemoteMsg {
/// A remote thread wants to spawn a task.
Schedule(task::Notified<Arc<Shared>>),
}
// Safety: Used correctly, the task header is "thread safe". Ultimately the task
// is owned by the current thread executor, for which this instruction is being
// sent.
unsafe impl Send for RemoteMsg {}
/// Scheduler state shared between threads.
struct Shared {
/// Remote run queue. None if the `Runtime` has been dropped.
queue: Mutex<Option<VecDeque<RemoteMsg>>>,
/// Collection of all active tasks spawned onto this executor.
owned: OwnedTasks<Arc<Shared>>,
/// Unpark the blocked thread.
unpark: <Driver as Park>::Unpark,
/// Indicates whether the blocked on thread was woken.
woken: AtomicBool,
/// Callback for a worker parking itself
before_park: Option<Callback>,
/// Callback for a worker unparking itself
after_unpark: Option<Callback>,
/// Keeps track of various runtime stats.
stats: RuntimeStats,
}
/// Thread-local context.
struct Context {
/// Handle to the spawner
spawner: Spawner,
/// Scheduler core, enabling the holder of `Context` to execute the
/// scheduler.
core: RefCell<Option<Box<Core>>>,
}
/// Initial queue capacity.
const INITIAL_CAPACITY: usize = 64;
/// Max number of tasks to poll per tick.
#[cfg(loom)]
const MAX_TASKS_PER_TICK: usize = 4;
#[cfg(not(loom))]
const MAX_TASKS_PER_TICK: usize = 61;
/// How often to check the remote queue first.
const REMOTE_FIRST_INTERVAL: u8 = 31;
// Tracks the current BasicScheduler.
scoped_thread_local!(static CURRENT: Context);
impl BasicScheduler {
pub(crate) fn new(
driver: Driver,
before_park: Option<Callback>,
after_unpark: Option<Callback>,
) -> BasicScheduler {
let unpark = driver.unpark();
let spawner = Spawner {
shared: Arc::new(Shared {
queue: Mutex::new(Some(VecDeque::with_capacity(INITIAL_CAPACITY))),
owned: OwnedTasks::new(),
unpark,
woken: AtomicBool::new(false),
before_park,
after_unpark,
stats: RuntimeStats::new(1),
}),
};
let core = AtomicCell::new(Some(Box::new(Core {
tasks: VecDeque::with_capacity(INITIAL_CAPACITY),
spawner: spawner.clone(),
tick: 0,
driver: Some(driver),
stats: WorkerStatsBatcher::new(0),
})));
BasicScheduler {
core,
notify: Notify::new(),
spawner,
context_guard: None,
}
}
pub(crate) fn spawner(&self) -> &Spawner {
&self.spawner
}
pub(crate) fn block_on<F: Future>(&self, future: F) -> F::Output {
pin!(future);
// Attempt to steal the scheduler core and block_on the future if we can
// there, otherwise, lets select on a notification that the core is
// available or the future is complete.
loop {
if let Some(core) = self.take_core() {
return core.block_on(future);
} else {
let mut enter = crate::runtime::enter(false);
let notified = self.notify.notified();
pin!(notified);
if let Some(out) = enter
.block_on(poll_fn(|cx| {
if notified.as_mut().poll(cx).is_ready() {
return Ready(None);
}
if let Ready(out) = future.as_mut().poll(cx) {
return Ready(Some(out));
}
Pending
}))
.expect("Failed to `Enter::block_on`")
{
return out;
}
}
}
}
fn take_core(&self) -> Option<CoreGuard<'_>> {
let core = self.core.take()?;
Some(CoreGuard {
context: Context {
spawner: self.spawner.clone(),
core: RefCell::new(Some(core)),
},
basic_scheduler: self,
})
}
pub(super) fn set_context_guard(&mut self, guard: EnterGuard) {
self.context_guard = Some(guard);
}
}
impl Context {
/// Execute the closure with the given scheduler core stored in the
/// thread-local context.
fn run_task<R>(&self, mut core: Box<Core>, f: impl FnOnce() -> R) -> (Box<Core>, R) {
core.stats.incr_poll_count();
self.enter(core, || crate::coop::budget(f))
}
/// Blocks the current thread until an event is received by the driver,
/// including I/O events, timer events, ...
fn park(&self, mut core: Box<Core>) -> Box<Core> {
let mut driver = core.driver.take().expect("driver missing");
if let Some(f) = &self.spawner.shared.before_park {
// Incorrect lint, the closures are actually different types so `f`
// cannot be passed as an argument to `enter`.
#[allow(clippy::redundant_closure)]
let (c, _) = self.enter(core, || f());
core = c;
}
// This check will fail if `before_park` spawns a task for us to run
// instead of parking the thread
if core.tasks.is_empty() {
// Park until the thread is signaled
core.stats.about_to_park();
core.stats.submit(&core.spawner.shared.stats);
let (c, _) = self.enter(core, || {
driver.park().expect("failed to park");
});
core = c;
core.stats.returned_from_park();
}
if let Some(f) = &self.spawner.shared.after_unpark {
// Incorrect lint, the closures are actually different types so `f`
// cannot be passed as an argument to `enter`.
#[allow(clippy::redundant_closure)]
let (c, _) = self.enter(core, || f());
core = c;
}
core.driver = Some(driver);
core
}
/// Checks the driver for new events without blocking the thread.
fn park_yield(&self, mut core: Box<Core>) -> Box<Core> {
let mut driver = core.driver.take().expect("driver missing");
core.stats.submit(&core.spawner.shared.stats);
let (mut core, _) = self.enter(core, || {
driver
.park_timeout(Duration::from_millis(0))
.expect("failed to park");
});
core.driver = Some(driver);
core
}
fn enter<R>(&self, core: Box<Core>, f: impl FnOnce() -> R) -> (Box<Core>, R) {
// Store the scheduler core in the thread-local context
//
// A drop-guard is employed at a higher level.
*self.core.borrow_mut() = Some(core);
// Execute the closure while tracking the execution budget
let ret = f();
// Take the scheduler core back
let core = self.core.borrow_mut().take().expect("core missing");
(core, ret)
}
}
impl Drop for BasicScheduler {
fn drop(&mut self) |
}
impl fmt::Debug for BasicScheduler {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BasicScheduler").finish()
}
}
// ===== impl Spawner =====
impl Spawner {
/// Spawns a future onto the basic scheduler
pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: crate::future::Future + Send + 'static,
F::Output: Send + 'static,
{
let (handle, notified) = self.shared.owned.bind(future, self.shared.clone());
if let Some(notified) = notified {
self.shared.schedule(notified);
}
handle
}
pub(crate) fn stats(&self) -> &RuntimeStats {
&self.shared.stats
}
fn pop(&self) -> Option<RemoteMsg> {
match self.shared.queue.lock().as_mut() {
Some(queue) => queue.pop_front(),
None => None,
}
}
fn waker_ref(&self) -> WakerRef<'_> {
// Set woken to true when enter block_on, ensure outer future
// be polled for the first time when enter loop
self.shared.woken.store(true, Release);
waker_ref(&self.shared)
}
// reset woken to false and return original value
pub(crate) fn reset_woken(&self) -> bool {
self.shared.woken.swap(false, AcqRel)
}
}
impl fmt::Debug for Spawner {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Spawner").finish()
}
}
// ===== impl Shared =====
impl Schedule for Arc<Shared> {
fn release(&self, task: &Task<Self>) -> Option<Task<Self>> {
self.owned.remove(task)
}
fn schedule(&self, task: task::Notified<Self>) {
CURRENT.with(|maybe_cx| match maybe_cx {
Some(cx) if Arc::ptr_eq(self, &cx.spawner.shared) => {
let mut core = cx.core.borrow_mut();
// If `None`, the runtime is shutting down, so there is no need
// to schedule the task.
if let Some(core) = core.as_mut() {
core.tasks.push_back(task);
}
}
_ => {
// If the queue is None, then the runtime has shut down. We
// don't need to do anything with the notification in that case.
let mut guard = self.queue.lock();
if let Some(queue) = guard.as_mut() {
queue.push_back(RemoteMsg::Schedule(task));
drop(guard);
self.unpark.unpark();
}
}
});
}
}
impl Wake for Shared {
fn wake(self: Arc<Self>) {
Wake::wake_by_ref(&self)
}
/// Wake by reference
fn wake_by_ref(arc_self: &Arc<Self>) {
arc_self.woken.store(true, Release);
arc_self.unpark.unpark();
}
}
// ===== CoreGuard =====
/// Used to ensure we always place the `Core` value back into its slot in
/// `BasicScheduler`, even if the future panics.
struct CoreGuard<'a> {
context: Context,
basic_scheduler: &'a BasicScheduler,
}
impl CoreGuard<'_> {
fn block_on<F: Future>(self, future: F) -> F::Output {
self.enter(|mut core, context| {
let _enter = crate::runtime::enter(false);
let waker = context.spawner.waker_ref();
let mut cx = std::task::Context::from_waker(&waker);
pin!(future);
'outer: loop {
if core.spawner.reset_woken() {
let (c, res) = context.run_task(core, || future.as_mut().poll(&mut cx));
core = c;
if let Ready(v) = res {
return (core, v);
}
}
for _ in 0..MAX_TASKS_PER_TICK {
// Get and increment the current tick
let tick = core.tick;
core.tick = core.tick.wrapping_add(1);
let entry = if tick % REMOTE_FIRST_INTERVAL == 0 {
core.spawner
.pop()
.or_else(|| core.tasks.pop_front().map(RemoteMsg::Schedule))
} else {
core.tasks
.pop_front()
.map(RemoteMsg::Schedule)
.or_else(|| core.spawner.pop())
};
let entry = match entry {
Some(entry) => entry,
None => {
core = context.park(core);
// Try polling the `block_on` future next
continue 'outer;
}
};
match entry {
RemoteMsg::Schedule(task) => {
let task = context.spawner.shared.owned.assert_owner(task);
let (c, _) = context.run_task(core, || {
task.run();
});
core = c;
}
}
}
// Yield to the driver, this drives the timer and pulls any
// pending I/O events.
core = context.park_yield(core);
}
})
}
/// Enters the scheduler context. This sets the queue and other necessary
/// scheduler state in the thread-local.
fn enter<F, R>(self, f: F) -> R
where
F: FnOnce(Box<Core>, &Context) -> (Box<Core>, R),
{
// Remove `core` from `context` to pass into the closure.
let core = self.context.core.borrow_mut().take().expect("core missing");
// Call the closure and place `core` back
let (core, ret) = CURRENT.set(&self.context, || f(core, &self.context));
*self.context.core.borrow_mut() = Some(core);
ret
}
}
impl Drop for CoreGuard<'_> {
fn drop(&mut self) {
if let Some(core) = self.context.core.borrow_mut().take() {
// Replace old scheduler back into the state to allow
// other threads to pick it up and drive it.
self.basic_scheduler.core.set(core);
// Wake up other possible threads that could steal the driver.
self.basic_scheduler.notify.notify_one()
}
}
}
| {
// Avoid a double panic if we are currently panicking and
// the lock may be poisoned.
let core = match self.take_core() {
Some(core) => core,
None if std::thread::panicking() => return,
None => panic!("Oh no! We never placed the Core back, this is a bug!"),
};
core.enter(|mut core, context| {
// Drain the OwnedTasks collection. This call also closes the
// collection, ensuring that no tasks are ever pushed after this
// call returns.
context.spawner.shared.owned.close_and_shutdown_all();
// Drain local queue
// We already shut down every task, so we just need to drop the task.
while let Some(task) = core.tasks.pop_front() {
drop(task);
}
// Drain remote queue and set it to None
let remote_queue = core.spawner.shared.queue.lock().take();
// Using `Option::take` to replace the shared queue with `None`.
// We already shut down every task, so we just need to drop the task.
if let Some(remote_queue) = remote_queue {
for entry in remote_queue {
match entry {
RemoteMsg::Schedule(task) => {
drop(task);
}
}
}
}
assert!(context.spawner.shared.owned.is_empty());
(core, ())
});
} |
dcim_devices_delete_parameters.go | // Code generated by go-swagger; DO NOT EDIT.
// Copyright 2020 The go-netbox Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package dcim
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NewDcimDevicesDeleteParams creates a new DcimDevicesDeleteParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewDcimDevicesDeleteParams() *DcimDevicesDeleteParams {
return &DcimDevicesDeleteParams{ | }
// NewDcimDevicesDeleteParamsWithTimeout creates a new DcimDevicesDeleteParams object
// with the ability to set a timeout on a request.
func NewDcimDevicesDeleteParamsWithTimeout(timeout time.Duration) *DcimDevicesDeleteParams {
return &DcimDevicesDeleteParams{
timeout: timeout,
}
}
// NewDcimDevicesDeleteParamsWithContext creates a new DcimDevicesDeleteParams object
// with the ability to set a context for a request.
func NewDcimDevicesDeleteParamsWithContext(ctx context.Context) *DcimDevicesDeleteParams {
return &DcimDevicesDeleteParams{
Context: ctx,
}
}
// NewDcimDevicesDeleteParamsWithHTTPClient creates a new DcimDevicesDeleteParams object
// with the ability to set a custom HTTPClient for a request.
func NewDcimDevicesDeleteParamsWithHTTPClient(client *http.Client) *DcimDevicesDeleteParams {
return &DcimDevicesDeleteParams{
HTTPClient: client,
}
}
/* DcimDevicesDeleteParams contains all the parameters to send to the API endpoint
for the dcim devices delete operation.
Typically these are written to a http.Request.
*/
type DcimDevicesDeleteParams struct {
/* ID.
A unique integer value identifying this device.
*/
ID int64
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the dcim devices delete params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DcimDevicesDeleteParams) WithDefaults() *DcimDevicesDeleteParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the dcim devices delete params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DcimDevicesDeleteParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the dcim devices delete params
func (o *DcimDevicesDeleteParams) WithTimeout(timeout time.Duration) *DcimDevicesDeleteParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the dcim devices delete params
func (o *DcimDevicesDeleteParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the dcim devices delete params
func (o *DcimDevicesDeleteParams) WithContext(ctx context.Context) *DcimDevicesDeleteParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the dcim devices delete params
func (o *DcimDevicesDeleteParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the dcim devices delete params
func (o *DcimDevicesDeleteParams) WithHTTPClient(client *http.Client) *DcimDevicesDeleteParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the dcim devices delete params
func (o *DcimDevicesDeleteParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the dcim devices delete params
func (o *DcimDevicesDeleteParams) WithID(id int64) *DcimDevicesDeleteParams {
o.SetID(id)
return o
}
// SetID adds the id to the dcim devices delete params
func (o *DcimDevicesDeleteParams) SetID(id int64) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *DcimDevicesDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
} | timeout: cr.DefaultTimeout,
} |
Group.js | import _defineProperty from 'babel-runtime/helpers/defineProperty';
import * as React from 'react'; | var _props$prefixCls = props.prefixCls,
prefixCls = _props$prefixCls === undefined ? 'ant-input-group' : _props$prefixCls,
_props$className = props.className,
className = _props$className === undefined ? '' : _props$className;
var cls = classNames(prefixCls, (_classNames = {}, _defineProperty(_classNames, prefixCls + '-lg', props.size === 'large'), _defineProperty(_classNames, prefixCls + '-sm', props.size === 'small'), _defineProperty(_classNames, prefixCls + '-compact', props.compact), _classNames), className);
return React.createElement(
'span',
{ className: cls, style: props.style },
props.children
);
};
export default Group; | import classNames from 'classnames';
var Group = function Group(props) {
var _classNames;
|
hooks.js | import { useCallback } from 'react';
import {
shallowEqual,
useDispatch,
useSelector as useReduxSelector,
} from 'react-redux';
export const useAction = action => {
const dispatch = useDispatch();
return useCallback(prop => dispatch(action(prop)), [action, dispatch]); | };
export const useSelector = selector => useReduxSelector(selector, shallowEqual); |
|
spec_test.go | package kots
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_fileHasContent(t *testing.T) {
tests := []struct {
name string
file SpecFile
want bool
}{
{
name: "basic empty file",
file: SpecFile{
Name: "a.yaml",
Path: "a.yaml",
Content: "",
},
want: false,
},
{
name: "basic with content",
file: SpecFile{
Name: "a.yaml",
Path: "a.yaml",
Content: "key: value",
},
want: true,
},
{
name: "only spaces and comments",
file: SpecFile{
Name: "a.yaml",
Path: "a.yaml",
Content: `# comment
# another comment`,
},
want: false,
},
{
name: "empty but multi doc",
file: SpecFile{
Name: "a.yaml",
Path: "a.yaml",
Content: `# comment
# another comment
---
# another comment`,
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
hasContent := tt.file.hasContent()
assert.Equal(t, hasContent, tt.want)
})
}
}
func Test_unnestSpecFiles(t *testing.T) {
tests := []struct {
name string
files SpecFiles
want SpecFiles
}{
{
name: "basic",
files: SpecFiles{
{
Name: "a",
Path: "a",
Children: SpecFiles{
{
Name: "b",
Path: "a/b",
},
{
Name: "c",
Path: "a/c",
Children: SpecFiles{
{
Name: "d",
Path: "a/c/d",
},
{
Name: "e",
Path: "a/c/e",
},
},
},
},
},
{
Name: "b",
Path: "b",
Children: SpecFiles{
{
Name: "c",
Path: "b/c", | Children: SpecFiles{
{
Name: "d",
Path: "b/c/d",
},
},
},
},
},
},
want: SpecFiles{
{
Name: "b",
Path: "a/b",
},
{
Name: "d",
Path: "a/c/d",
},
{
Name: "e",
Path: "a/c/e",
},
{
Name: "d",
Path: "b/c/d",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
unnestedFiles := tt.files.unnest()
assert.ElementsMatch(t, unnestedFiles, tt.want)
})
}
}
func Test_separateSpecFiles(t *testing.T) {
tests := []struct {
name string
files SpecFiles
want SpecFiles
}{
{
name: "basic",
files: SpecFiles{
{
Name: "a.yaml",
Path: "a.yaml",
Content: "key0: value0",
},
{
Name: "b.yaml",
Path: "b.yaml",
Content: `key0: value0
---
key1: value1`,
},
{
Name: "c.yaml",
Path: "c.yaml",
Content: `---
key0: value0
---
key1: value1`,
},
{
Name: "d.yaml",
Path: "d.yaml",
Content: `---
key0: value0
---
key1: value1
---
key2: value2`,
},
{
Name: "e.yaml",
Path: "e.yaml",
Content: `---`,
},
{
Name: "f.yaml",
Path: "f.yaml",
Content: `# comment
---
# another comment`,
},
{
Name: "g.yaml",
Path: "g.yaml",
Content: `# comment
# another comment`,
},
{
Name: "h.yaml",
Path: "h.yaml",
Content: "",
},
{
Name: "i.yaml",
Path: "i.yaml",
Content: `key0: value0
---
# comment`,
},
{
Name: "j.yaml",
Path: "j.yaml",
Content: `---
# comment
---
# another comment`,
},
{
Name: "preserve-quotes.yaml",
Path: "preserve-quotes.yaml",
Content: `key: 'repl{{ConfigOption "my_config_option"}}'`,
},
},
want: SpecFiles{
{
Name: "a.yaml",
Path: "a.yaml",
Content: "key0: value0",
DocIndex: 0,
},
{
Name: "b.yaml",
Path: "b.yaml",
Content: "key0: value0",
DocIndex: 0,
},
{
Name: "b.yaml",
Path: "b.yaml",
Content: "key1: value1",
DocIndex: 1,
},
{
Name: "c.yaml",
Path: "c.yaml",
Content: "key0: value0",
DocIndex: 0,
},
{
Name: "c.yaml",
Path: "c.yaml",
Content: "key1: value1",
DocIndex: 1,
},
{
Name: "d.yaml",
Path: "d.yaml",
Content: "key0: value0",
DocIndex: 0,
},
{
Name: "d.yaml",
Path: "d.yaml",
Content: "key1: value1",
DocIndex: 1,
},
{
Name: "d.yaml",
Path: "d.yaml",
Content: "key2: value2",
DocIndex: 2,
},
{
Name: "i.yaml",
Path: "i.yaml",
Content: "key0: value0",
DocIndex: 0,
},
{
Name: "preserve-quotes.yaml",
Path: "preserve-quotes.yaml",
Content: `key: 'repl{{ConfigOption "my_config_option"}}'`,
DocIndex: 0,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
separatedFiles, err := tt.files.separate()
require.NoError(t, err)
assert.ElementsMatch(t, separatedFiles, tt.want)
})
}
} | |
standalone.py | # --coding:utf-8--
# Copyright (c) 2017 Intel Corporation
# | # You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import queue
import sys
from optparse import OptionParser
sys.path.append('../../')
from pyutilities.serve.options import Options
from pyutilities.serve.daemon import daemonize
from pyutilities.serve.core import PluginManager
VERSION = 1.0
def main():
parser = OptionParser(usage='usage: %prog [options] ...',
version='%%prog %s' % VERSION)
if os.name == 'posix':
parser.add_option('-d', '--daemonize', action='store_true',
dest='daemonize',
help='run in the background as a daemon')
parser.add_option('--pidfile', action='store',
dest='pidfile',
help='when daemonizing, file to which to write pid')
options, args = parser.parse_args()
if options.daemonize and options.autoreload:
parser.error('the --auto-reload option cannot be used with '
'--daemonize')
if parser.has_option('pidfile') and options.pidfile:
options.pidfile = os.path.abspath(options.pidfile)
q = queue.Queue()
opt = Options('default.conf')
manager = PluginManager(opt, q)
manager.run()
try:
if options.daemonize:
daemonize(pidfile=options.pidfile, progname='opcua_plugin')
except OSError as e:
print("%s: %s" % (e.__class__.__name__, e), file=sys.stderr)
sys.exit(1)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main() | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. |
DownloadVGG_Faces.py | import os
from PIL import Image
import urllib.request as ur
import urllib.request
from io import BytesIO
import requests
import csv
import h5py
import numpy as np
import argparse
def retrieve_patch( rec ):
|
def retrieve_celeb( filename ):
csvfile = open( filename, 'r')
reader = csv.reader(csvfile, delimiter=' ')
pts = []
for row in reader:
print( "image = ", row[0] )
if ( row[8] != '1' ):
continue
try:
pt = retrieve_patch( row )
pts.append( pt )
except IOError as e:
continue
return pts
#Parsing the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("-folder",
help="folder for the HDF5 file and subfolder files")
args = parser.parse_args()
content_list = os.listdir( os.path.join( args.folder, "files") )
celebs = []
for celeb in content_list[0:100]:
print( "Celeb", celeb )
pts = retrieve_celeb( os.path.join( args.folder, "files", celeb ) )
celebs = celebs + pts
file = h5py.File( os.path.join( args.folder, "dataset.hdf5" ), 'w')
dset = file.create_dataset("/patches", data = celebs )
file.close()
| response = requests.get( rec[1], timeout=10 )
file = BytesIO( response.content )
img = Image.open( file )
ptch = img.crop( ( float(rec[2]),float(rec[3]),float(rec[4]), float(rec[5])) ).resize( (32,32) ).convert('L')
return np.asarray( ptch, dtype=np.uint8 ) |
lambda.ts | import { ApolloServer } from "apollo-server-lambda"
import { resolvers } from "./src/resolvers"
import { typeDefs } from "./src/typeDefs" | resolvers,
// @ts-ignore
context: ({ event, context }) => ({
headers: event.headers,
functionName: context.functionName,
event,
context,
}),
})
export const handler = server.createHandler() |
const server = new ApolloServer({
typeDefs, |
import_harborough.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000131"
addresses_name = (
"local.2019-05-02/Version 1/Democracy_Club__02May2019 Harborough DC.tsv"
)
stations_name = (
"local.2019-05-02/Version 1/Democracy_Club__02May2019 Harborough DC.tsv"
)
elections = ["local.2019-05-02"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn == "200003741884":
rec["postcode"] = "LE14 2QY"
| record.addressline1.strip() == "69 Main Street"
and record.addressline2.strip() == "Great Bowden"
and record.addressline3.strip() == "Market Harborough, Leics"
):
rec["postcode"] = "LE16 7HD"
rec["accept_suggestion"] = False
if uprn in [
"100030474314", # LE79DE -> LE79DP : Grange Barn, Loddington Road, Tilton on the Hill, Leicester
"100030474315", # LE79DE -> LE79DP : Grange Yard, Loddington Road, Tilton on the Hill, Leicester
"200003741317", # LE79DE -> LE79DP : Robin A Tiptoe Farm, Loddington Road, Tilton on the Hill, Leicester
"200003742237", # LE79XE -> LE79XB : Ash Tree Cottage, Launde Road, Loddington, Leicester
"100030477785", # LE96PU -> LE96PW : 102 Station Road, Broughton Astley, Leics
]:
rec["accept_suggestion"] = True
if uprn in [
"200003741417", # LE79YE -> LE79FN : Park Farm, Uppingham Road, Skeffington, Leicester
"200003737159", # LE175EA -> LE175RA : Hillcrest Farm, Frolesworth Road, Leire, Lutterworth, Leics
"200003737160", # LE175EA -> LE175RA : Mount Pleasant, Frolesworth Road, Leire, Lutterworth, Leics
"100032072508", # LE88AQ -> LE88AN : Wayside, Arnesby Road, Fleckney, Leicestershire
"100030493011", # LE167SZ -> LE167SX : The Old Rectory, Stonton Road, Church Langton, Market Harborough, Leics
"200003739029", # LE167RU -> LE167RT : Hunters Lodge, Main Street, Gumley, Market Harborough, Leics
"100030480043", # LE174RU -> LE174RX : Toll Gate Cottage, Bitteswell Road, Lutterworth, Leics
"10034458557", # LE175LE -> LE174LE : The Milking Parlour Boston Lodge, Lutterworth Road, Gilmorton, Lutterworth, Leics
"200003744797", # LE175PL -> LE175RZ : Ewe Cottage Gilmorton Lodge, Kimcote Road, Gilmorton, Lutterworth, Leics
"100030493741", # LE167TT -> LE167TX : Birchtree Farm, Welham Road, Thorpe Langton, Leics
"200003742100", # LE174LH -> LE174LR : The Mere, Mere Road, Bitteswell, Lutterworth, Leics
"200003741377", # LE79XL -> LE79XJ : 3 Fiddlers Green, Uppingham Road, East Norton, Leicester
"200003741379", # LE79XL -> LE79XJ : 2 Fiddlers Green, Uppingham Road, East Norton, Leicester
"200003741382", # LE79XL -> LE79XJ : 1 Fiddlers Green, Uppingham Road, East Norton, Leicester
]:
rec["accept_suggestion"] = False
return rec | if ( |
models.py | """ Code is generated by ucloud-model, DO NOT EDIT IT. """
from ucloud.core.typesystem import schema, fields
class ParamSchema(schema.ResponseSchema):
"""Param - 工作流参数"""
fields = {
"Name": fields.Str(required=False, load_from="Name"),
"Type": fields.Str(required=False, load_from="Type"),
"Value": fields.Str(required=False, load_from="Value"),
}
class ActivityTemplateSchema(schema.ResponseSchema):
"""ActivityTemplate - 工作流的Activity定义"""
fields = {
"Input": fields.Str(),
"Name": fields.Str(required=False, load_from="Name"),
"Next": fields.Str(required=False, load_from="Next"),
"Output": fields.List(fields.Str()),
"RetryTimes": fields.Str(required=False, load_from="RetryTimes"),
"Timeout": fields.Str(required=False, load_from="Timeout"),
"Type": fields.Str(required=False, load_from="Type"),
}
class WorkflowTemplateSchema(schema.ResponseSchema):
"""WorkflowTemplate - | Workflow对象定义"""
fields = {
"Activites": fields.List(ActivityTemplateSchema()),
"Input": fields.List(ParamSchema()),
"Output": fields.List(ParamSchema()),
}
|
|
EnvironmentHandler.ts | import RequestHandler from '../../request/RequestHandler';
export default class | extends RequestHandler
{
protected encodingKey: string;
protected decodingKey: string | undefined;
public constructor(encodingKey: string, decodingKey: string | undefined = undefined)
{
super();
this.encodingKey = encodingKey
this.decodingKey = decodingKey
}
}
| EnvironmentHandler |
mod.rs | use crate::topology::config::component::ComponentDescription;
use crate::Event;
use inventory;
pub mod check_fields;
pub mod is_log;
pub mod is_metric;
pub use check_fields::CheckFieldsConfig;
pub trait Condition: Send + Sync {
fn check(&self, e: &Event) -> bool;
/// Provides context for a failure. This is potentially mildly expensive if
/// it involves string building and so should be avoided in hot paths.
fn check_with_context(&self, e: &Event) -> Result<(), String> |
}
#[typetag::serde(tag = "type")]
pub trait ConditionConfig: std::fmt::Debug {
fn build(&self) -> crate::Result<Box<dyn Condition>>;
}
pub type ConditionDescription = ComponentDescription<Box<dyn ConditionConfig>>;
inventory::collect!(ConditionDescription);
| {
if self.check(e) {
Ok(())
} else {
Err("condition failed".into())
}
} |
test_utils_django.py | from unittest import mock
from unittest.mock import Mock
from bitcaster.utils.django import (activator_factory,
deactivator_factory, toggler_factory,)
def test_toggler_factory():
|
def test_activator_factory():
with mock.patch('bitcaster.utils.django.get_connection'):
func = activator_factory('test')
assert func(Mock(), Mock(), Mock())
def test_deactivator_factory():
with mock.patch('bitcaster.utils.django.get_connection'):
func = deactivator_factory('test')
assert func(Mock(), Mock(), Mock())
| with mock.patch('bitcaster.utils.django.get_connection'):
func = toggler_factory('test')
assert func(Mock(), Mock(), Mock()) |
FSANet_Train.py | import time
import sys
import argparse
import datetime
import pathlib
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
# Set seed
torch.manual_seed(0)
# Where to add a new import
from torch.optim.lr_scheduler import StepLR
# from torch.utils.tensorboard import SummaryWriter
# from torchsummary import summary
# from torchsummaryX import summary
import datasets
from HPEDA.FSANetDA import FSANet
from utils import AverageMeter
def parse_args():
|
def main():
# Parse Arguments
args = parse_args()
# get device GPU or CPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load Dataset
if args.dataset == 'PoseSynthNPY':
pose_dataset = datasets.Pose_Synth_NPY(args.data_dir, args.filename_list)
elif args.dataset == 'Pose_Synth_Raw':
pose_dataset = datasets.Pose_Synth_Raw(args.data_dir, args.filename_list)
elif args.dataset == 'PoseSynRealRaw':
pose_dataset = datasets.Pose_Synth_Raw_RB(args.data_dir, args.filename_list)
elif args.dataset == 'Pose_BIWI_NPY':
pose_dataset = datasets.Pose_BIWI_NPY(args.data_dir, args.filename_list)
elif args.dataset == 'Pose_Synth_NPYDA':
pose_dataset = datasets.Pose_Synth_NPYDA(args.data_dir, args.filename_list)
else:
print('Error: not a valid dataset name')
sys.exit()
# hyper parameters & Model Params
num_capsule = 3
dim_capsule = 16
routings = 5
num_primcaps = 7 * 3
m_dim = 5
s_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]
model = FSANet(s_set).cuda()
print('Model created.')
# print(summary(model, torch.rand((1, 3, 64, 64)).cuda()))
# get multiple GPU
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
# load model to GPU
model.to(device)
# transfer learning
# modelPath = r'models/MySynthNPY_11-22-2020_21-41-04-n8857-e100-bs8-lr0.0001/weights.epoch89_model.pth'
# model.load_state_dict(torch.load(modelPath))
# Training parameters
optimizer = torch.optim.Adam(model.parameters(), args.lr)
batch_size = args.batch_size
# Load train loader
train_loader = torch.utils.data.DataLoader(dataset=pose_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=True,
num_workers=2)
# Loss
l1_criterion = nn.L1Loss()
nll_criterion = nn.NLLLoss() # domain adaptation
now = datetime.datetime.now() # current date and time
runID = args.output_string + now.strftime("_%m-%d-%Y_%H-%M-%S") \
+ '-n' + str(len(train_loader)) \
+ '-e' + str(args.epochs) \
+ '-bs' + str(batch_size) \
+ '-lr' + str(args.lr)
outputPath = './models/'
runPath = outputPath + runID
pathlib.Path(runPath).mkdir(parents=True, exist_ok=True)
# gamma = decaying factor (lr decayed on each step_size epoch with a rate of gamma
# scheduler = StepLR(optimizer, step_size=2, gamma=0.1)
# Start training...
for epoch in range(args.epochs):
batch_time = AverageMeter()
losses = AverageMeter()
N = len(train_loader)
# print('###################################### Epoch :', str(epoch))
# Switch to train mode
model.train()
end = time.time()
# Decay Learning Rate
# scheduler.step()
for i, (images, cont_labels) in enumerate(train_loader):
optimizer.zero_grad()
images = Variable(images).cuda()
label_angles = Variable(cont_labels[:, :3]).cuda(non_blocking=True)
# Predict
angles, _ = model(images, alpha=0.1)
# Compute the loss
l1_pose = l1_criterion(angles, label_angles)
loss = l1_pose
# Update step
losses.update(loss.data.item(), images.size(0))
loss.backward()
optimizer.step()
# Measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
eta = str(datetime.timedelta(seconds=int(batch_time.val * (N - i))))
# Log progress
if i % 5 == 0:
# Print to console
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.sum:.3f})\t'
'ETA {eta}\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'L1_Loss: {l1_loss:.4f}'
.format(epoch, i, N,
batch_time=batch_time,
loss=losses,
eta=eta,
l1_loss=l1_pose
))
# save Model intermediate
path = runPath + '/weights.epoch{0}_model.pth'.format(epoch)
torch.save(model.cpu().state_dict(), path) # saving model
model.cuda()
# Start DA Training
# for epoch in range(args.epochs):
# batch_time = AverageMeter()
# losses = AverageMeter()
# N = len(train_loader)
#
# # print('###################################### Epoch :', str(epoch))
#
# # Switch to train mode
# model.train()
#
# end = time.time()
#
#
#
# for i, (images, cont_labels, biwiImages) in enumerate(train_loader):
#
# p = float(i + epoch * N) / args.epochs / N
# alpha = 2. / (1. + np.exp(-10 * p)) - 1
#
# optimizer.zero_grad()
#
# source_images = Variable(images).cuda()
# label_angles = Variable(cont_labels[:, :3]).cuda(non_blocking=True)
# source_domain_label = torch.zeros(batch_size)
# source_domain_label = source_domain_label.long().cuda()
#
# target_images = Variable(biwiImages).cuda()
# target_domain_label = torch.ones(batch_size)
# target_domain_label = target_domain_label.long().cuda()
#
# # Predict source domain
# angles, source_domain_output = model(source_images, alpha=alpha)
#
# # Compute the loss in source domain
# l1_pose = l1_criterion(angles, label_angles)
# nll_source = nll_criterion(source_domain_output, source_domain_label)
#
# # Predict target domain
# _, target_domain_output = model(target_images, alpha=alpha)
#
# # Compute the loss in target domain
# nll_target = nll_criterion(target_domain_output, target_domain_label)
#
# loss = 0.2*l1_pose + 1.5*nll_source + 1.5*nll_target
#
#
#
# # Update step
# losses.update(loss.data.item(), images.size(0))
# loss.backward()
# optimizer.step()
#
# # Measure elapsed time
# batch_time.update(time.time() - end)
# end = time.time()
# eta = str(datetime.timedelta(seconds=int(batch_time.val * (N - i))))
#
# # Log progress
# if i % 5 == 0:
# # Print to console
# print('Epoch: [{0}][{1}/{2}]\t'
# 'Time {batch_time.val:.3f} ({batch_time.sum:.3f})\t'
# 'ETA {eta}\t'
# 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
# 'L1_Loss: {l1_loss:.4f}'
# .format(epoch, i, N,
# batch_time=batch_time,
# loss=losses,
# eta=eta,
# l1_loss=l1_pose
# ))
#
# # save Model intermediate
# path = runPath + '/weights.epoch{0}_model.pth'.format(epoch)
# torch.save(model.cpu().state_dict(), path) # saving model
# model.cuda()
if __name__ == '__main__':
main()
| """Parse input arguments."""
parser = argparse.ArgumentParser(description='Monocular Head Pose Estimation from Synthetic Data')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--epochs', dest='epochs', help='Maximum number of training epochs.',
default=40, type=int)
parser.add_argument('--bs', dest='batch_size', help='Batch size.',
default=8, type=int)
parser.add_argument('--lr', dest='lr', help='Base learning rate.',
default=0.0001, type=float)
parser.add_argument("--validation_split", type=float, default=0.01,
help="validation split ratio")
parser.add_argument('--data_dir', dest='data_dir', help='Directory path for data.',
default='/mnt/fastssd/Shubhajit_Stuff/HPECode/Data/BIWI/',
type=str)
parser.add_argument('--filename_list', dest='filename_list',
help='Path to text file containing relative paths for every example.',
default='/mnt/fastssd/Shubhajit_Stuff/HPECode/Data/Mixed/RawNPY.txt',
type=str)
parser.add_argument('--output_string', dest='output_string', help='String appended to output snapshots.',
default='BIWIRaw', type=str)
# Pose_Synth_Raw | PoseSynthNPY
parser.add_argument('--dataset', dest='dataset', help='Dataset type.', default='Pose_BIWI_NPY', type=str)
args = parser.parse_args()
return args |
test_webauthn_credential.py | import base64
import pytest
from fido2 import cbor
from fido2.cose import ES256
from app.models.webauthn_credential import RegistrationError, WebAuthnCredential
# noqa adapted from https://github.com/duo-labs/py_webauthn/blob/90e3d97e0182899a35a70fc510280b4082cce19b/tests/test_webauthn.py#L14-L24
SESSION_STATE = {'challenge': 'bPzpX3hHQtsp9evyKYkaZtVc9UN07PUdJ22vZUdDp94', 'user_verification': 'discouraged'}
CLIENT_DATA_JSON = b'{"type": "webauthn.create", "clientExtensions": {}, "challenge": "bPzpX3hHQtsp9evyKYkaZtVc9UN07PUdJ22vZUdDp94", "origin": "https://webauthn.io"}' # noqa
# had to use the cbor2 library to re-encode the attestationObject due to implementation differences
ATTESTATION_OBJECT = base64.b64decode(b'o2NmbXRoZmlkby11MmZnYXR0U3RtdKJjc2lnWEgwRgIhAI1qbvWibQos/t3zsTU05IXw1Ek3SDApATok09uc4UBwAiEAv0fB/lgb5Ot3zJ691Vje6iQLAtLhJDiA8zDxaGjcE3hjeDVjgVkCUzCCAk8wggE3oAMCAQICBDxoKU0wDQYJKoZIhvcNAQELBQAwLjEsMCoGA1UEAxMjWXViaWNvIFUyRiBSb290IENBIFNlcmlhbCA0NTcyMDA2MzEwIBcNMTQwODAxMDAwMDAwWhgPMjA1MDA5MDQwMDAwMDBaMDExLzAtBgNVBAMMJll1YmljbyBVMkYgRUUgU2VyaWFsIDIzOTI1NzM0ODExMTE3OTAxMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEvd9nk9t3lMNQMXHtLE1FStlzZnUaSLql2fm1ajoggXlrTt8rzXuSehSTEPvEaEdv/FeSqX22L6Aoa8ajIAIOY6M7MDkwIgYJKwYBBAGCxAoCBBUxLjMuNi4xLjQuMS40MTQ4Mi4xLjUwEwYLKwYBBAGC5RwCAQEEBAMCBSAwDQYJKoZIhvcNAQELBQADggEBAKrADVEJfuwVpIazebzEg0D4Z9OXLs5qZ/ukcONgxkRZ8K04QtP/CB5x6olTlxsj+SXArQDCRzEYUgbws6kZKfuRt2a1P+EzUiqDWLjRILSr+3/o7yR7ZP/GpiFKwdm+czb94POoGD+TS1IYdfXj94mAr5cKWx4EKjh210uovu/pLdLjc8xkQciUrXzZpPR9rT2k/q9HkZhHU+NaCJzky+PTyDbq0KKnzqVhWtfkSBCGw3ezZkTS+5lrvOKbIa24lfeTgu7FST5OwTPCFn8HcfWZMXMSD/KNU+iBqJdAwTLPPDRoLLvPTl29weCAIh+HUpmBQd0UltcPOrA/LFvAf61oYXV0aERhdGFYwnSm6pITyZwvdLIkkrMgz0AmKpTBqVCgOX8pJQtghB7wQQAAAAAAAAAAAAAAAAAAAAAAAAAAAECKU1ppjl9gmhHWyDkgHsUvZmhr6oF3/lD3llzLE2SaOSgOGIsIuAQqgp8JQSUu3r/oOaP8RS44dlQjrH+ALfYtpAECAyYhWCAxnqAfESXOYjKUc2WACuXZ3ch0JHxV0VFrrTyjyjIHXCJYIFnx8H87L4bApR4M+hPcV+fHehEOeW+KCyd0H+WGY8s6') # noqa
# manually adapted by working out which character in the encoded CBOR corresponds to the public key algorithm ID
UNSUPPORTED_ATTESTATION_OBJECT = base64.b64decode(b'o2NmbXRoZmlkby11MmZnYXR0U3RtdKJjc2lnWEgwRgIhAI1qbvWibQos/t3zsTU05IXw1Ek3SDApATok09uc4UBwAiEAv0fB/lgb5Ot3zJ691Vje6iQLAtLhJDiA8zDxaGjcE3hjeDVjgVkCUzCCAk8wggE3oAMCAQICBDxoKU0wDQYJKoZIhvcNAQELBQAwLjEsMCoGA1UEAxMjWXViaWNvIFUyRiBSb290IENBIFNlcmlhbCA0NTcyMDA2MzEwIBcNMTQwODAxMDAwMDAwWhgPMjA1MDA5MDQwMDAwMDBaMDExLzAtBgNVBAMMJll1YmljbyBVMkYgRUUgU2VyaWFsIDIzOTI1NzM0ODExMTE3OTAxMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEvd9nk9t3lMNQMXHtLE1FStlzZnUaSLql2fm1ajoggXlrTt8rzXuSehSTEPvEaEdv/FeSqX22L6Aoa8ajIAIOY6M7MDkwIgYJKwYBBAGCxAoCBBUxLjMuNi4xLjQuMS40MTQ4Mi4xLjUwEwYLKwYBBAGC5RwCAQEEBAMCBSAwDQYJKoZIhvcNAQELBQADggEBAKrADVEJfuwVpIazebzEg0D4Z9OXLs5qZ/ukcONgxkRZ8K04QtP/CB5x6olTlxsj+SXArQDCRzEYUgbws6kZKfuRt2a1P+EzUiqDWLjRILSr+3/o7yR7ZP/GpiFKwdm+czb94POoGD+TS1IYdfXj94mAr5cKWx4EKjh210uovu/pLdLjc8xkQciUrXzZpPR9rT2k/q9HkZhHU+NaCJzky+PTyDbq0KKnzqVhWtfkSBCGw3ezZkTS+5lrvOKbIa24lfeTgu7FST5OwTPCFn8HcfWZMXMSD/KNU+iBqJdAwTLPPDRoLLvPTl29weCAIh+HUpmBQd0UltcPOrA/LFvAf61oYXV0aERhdGFYwnSm6pITyZwvdLIkkrMgz0AmKpTBqVCgOX8pJQtghB7wQQAAAAAAAAAAAAAAAAAAAAAAAAAAAECKU1ppjl9gmhHWyDkgHsUvZmhr6oF3/lD3llzLE2SaOSgOGIsIuAQqgp8JQSUu3r/oOaP8RS44dlQjrH+ALfYtpAECAyUhWCAxnqAfESXOYjKUc2WACuXZ3ch0JHxV0VFrrTyjyjIHXCJYIFnx8H87L4bApR4M+hPcV+fHehEOeW+KCyd0H+WGY8s6') # noqa
def test_from_registration_verifies_response(webauthn_dev_server):
|
def test_from_registration_encodes_as_unicode(webauthn_dev_server):
registration_response = {
'clientDataJSON': CLIENT_DATA_JSON,
'attestationObject': ATTESTATION_OBJECT,
}
credential = WebAuthnCredential.from_registration(SESSION_STATE, registration_response)
serialized_credential = credential.serialize()
assert type(serialized_credential['credential_data']) == str
assert type(serialized_credential['registration_response']) == str
def test_from_registration_handles_library_errors(notify_admin):
registration_response = {
'clientDataJSON': CLIENT_DATA_JSON,
'attestationObject': ATTESTATION_OBJECT,
}
with pytest.raises(RegistrationError) as exc_info:
WebAuthnCredential.from_registration(SESSION_STATE, registration_response)
assert 'Invalid origin' in str(exc_info.value)
def test_from_registration_handles_unsupported_keys(webauthn_dev_server):
registration_response = {
'clientDataJSON': CLIENT_DATA_JSON,
'attestationObject': UNSUPPORTED_ATTESTATION_OBJECT,
}
with pytest.raises(RegistrationError) as exc_info:
WebAuthnCredential.from_registration(SESSION_STATE, registration_response)
assert 'Encryption algorithm not supported' in str(exc_info.value)
| registration_response = {
'clientDataJSON': CLIENT_DATA_JSON,
'attestationObject': ATTESTATION_OBJECT,
}
credential = WebAuthnCredential.from_registration(SESSION_STATE, registration_response)
assert credential.name == 'Unnamed key'
assert credential.registration_response == base64.b64encode(cbor.encode(registration_response)).decode('utf-8')
credential_data = credential.to_credential_data()
assert type(credential_data.credential_id) is bytes
assert type(credential_data.aaguid) is bytes
assert credential_data.public_key[3] == ES256.ALGORITHM |
search.js | // 검색어 enter
$("#fullTextSearch").keyup(function(e) {
if(e.keyCode == 13) {
if(fullTextSearchCheck()) {
fullTextSearch();
}
}
});
// 검색 버튼을 눌렀을때
$("#fullTextSearchButton").click(function() {
if(fullTextSearchCheck()) {
fullTextSearch();
}
});
// 닫기 버튼 클릭
$('#districtSearchCloseButton').click(function() {
$('#districtSearchResultContent').hide();
});
// 지도 클릭
$('#magoContainer').click(function(e) {
if ($("#districtSearchResultContent").is(':visible')) {
$("#districtSearchResultContent").hide();
}
});
// 입력 체크
function fullTextSearchCheck() {
if($("#fullTextSearch").val() === null || $("#fullTextSearch").val().trim() === "") {
alert(JS_MESSAGE["search.enter.word"]);
$("#fullTextSearch").focus();
return false;
}
if($("#fullTextSearch").val().trim().length === 1) {
alert(JS_MESSAGE["search.required.word"]);
$("#fullTextSearch").focus();
return false;
}
if ($("#districtSelectContent").is(':visible')) {
$("#districtSelectContent").hide();
}
return true;
}
var fullTextSearchFlag = true;
function fullTextSearch() {
if(fullTextSearchFlag) {
fullTextSearchFlag = false;
//if($('#searchContent').css("display") ==='none') $(".search").click();
districtSearch(null);
} else {
alert(JS_MESSAGE["searching"]); | }
}
// 검색창 클릭시 메뉴 제어
function showSearchMenu() {
$("#searchMenu").toggleClass("on");
$('#searchContent').toggle(true);
$('#contentsWrap').toggle(true);
// $("#newAddressSearchList").height(200);
}
// 행정구역 검색
function districtSearch(pageNo) {
event.stopPropagation();
var info = "fullTextSearch=" + $("#fullTextSearch").val();;
info += "&searchKey=newAddress";
if(pageNo !== null) {
info = info + "&pageNo=" + pageNo;
}
$.ajax({
url: "../searchmap/district",
type: "GET",
data: info,
dataType: "json",
success: function(msg){
if(msg.statusCode <= 200) {
msg.pagination.pageList = [];
var start = msg.pagination.startPage;
var end = msg.pagination.endPage;
for(i = start; i <= end; i++) {
msg.pagination.pageList.push(i);
}
//핸들바 템플릿 컴파일
var template = Handlebars.compile($("#districtSearchResultSource").html());
var pageTemplate = Handlebars.compile($("#districtPaginationSource").html());
$("#districtSearchResultDHTML").html("").append(template(msg))
$("#districtPaginationDHTML").html("").append(pageTemplate(msg));
$('#districtSearchResultContent').show();
fullTextSearchFlag = true;
} else {
alert(JS_MESSAGE[msg.errorCode]);
console.log("---- " + msg.message);
}
},
error:function(request,status,error) {
//console.log(" code : " + request.status + "\n" + ", message : " + request.responseText + "\n" + ", error : " + error);
alert(" code : " + request.status + "\n" + ", message : " + request.responseText + "\n" + ", error : " + error);
console.log("new address error ..... searchTypeCount = " + searchTypeCount);
}
});
} | return; |
getheaderresult.go | package jsonresult
import "github.com/incognitochain/incognito-chain/blockchain"
type GetHeaderResult struct {
BlockNum int `json:"blocknum"` | // Header blockchain.ShardBlock `json:"header"`
Header blockchain.ShardHeader `json:"header"`
} | ShardID byte `json:"shardID"`
BlockHash string `json:"blockhash"` |
mem.rs | use ppu::Ppu;
use apu::Apu;
use romMapper::RomMapper;
pub struct Ram {
pub val: [u8, ..0x800]
}
impl Ram {
pub fn new() -> Ram {
Ram{ val: [0, ..0x800] }
}
pub fn loadb(&self, addr: u16) -> u8 {
self.val[addr as uint & 0x7ff]
}
pub fn writeb(&mut self, addr: u16, val: u8) {
self.val[addr as uint & 0x7ff] = val
}
}
pub struct MemMapper {
pub ram: Ram,
pub ppu: Ppu,
pub apu: Apu,
pub romMapper: RomMapper,
}
impl MemMapper {
pub fn new(p: &Path) -> MemMapper {
MemMapper{
ram: Ram::new(),
ppu: Ppu::new(),
apu: Apu::new(),
romMapper: RomMapper::new(p),
}
}
// See the following link for details:
// http://wiki.nesdev.com/w/index.php/CPU_memory_map
pub fn loadb(&self, addr: u16) -> u8 {
if addr < 0x2000 {
self.ram.loadb(addr)
} else if addr < 0x4000 {
self.ppu.loadb(addr)
} else if addr < 0x4016 {
self.apu.loadb(addr)
} else if addr < 0x4020 {
panic!("HAVE NOT HOOKED UP INPUT YET")
} else {
self.romMapper.loadb(addr)
}
}
pub fn writeb(&mut self, addr: u16, val: u8) {
if addr < 0x2000 {
self.ram.writeb(addr, val)
} else if addr < 0x4000 {
self.ppu.writeb(addr, val)
} else if addr < 0x4016 {
self.apu.writeb(addr, val)
} else if addr < 0x4020 {
panic!("HAVE NOT HOOKED UP INPUT YET")
} else { | } | self.romMapper.writeb(addr, val)
}
} |
mod.rs | mod common;
mod lda_test;
mod branches_test;
mod flags_test; | mod decode_test;
mod pushpop_test;
mod cpu_test; |
|
doc_test.go | package logolang_test
import (
"fmt"
"github.com/Miguel-Dorta/logolang"
"os"
)
func ExampleLogger_color() {
log := logolang.NewLogger()
log.Level = logolang.LevelDebug
// Printed with color
log.Debug("debug test")
log.Info("info test")
log.Error("error test")
log.Critical("critical test")
log.Color = false
// Printed with no color
log.Debug("debug test")
log.Info("info test")
log.Error("error test")
log.Critical("critical test")
}
func | () {
log := logolang.NewLogger()
log.Level = logolang.LevelDebug
// Set Formatter to be "LEVEL: MESSAGE"
log.Formatter = func(levelName, msg string) string {
return fmt.Sprintf("%s: %s", levelName, msg)
}
log.Debug("debug test")
log.Info("info test")
log.Error("error test")
log.Critical("critical test")
}
func ExampleLogger_level() {
log := logolang.NewLogger()
log.Level = logolang.LevelInfo // Print info or lower
log.Debug("debug test") // Will not be printed
log.Info("info test") // Will be printed
log.Error("error test") // Will be printed
log.Critical("critical test") // Will be printed
}
func ExampleNewLoggerWriters() {
// Create output files
outFile, err := os.Create("/tmp/out.log")
if err != nil {
panic(err)
}
defer outFile.Close()
errFile, err := os.Create("/tmp/err.log")
if err != nil {
panic(err)
}
defer errFile.Close()
// Create safe writers
outSafe := &logolang.SafeWriter{W: outFile}
errSafe := &logolang.SafeWriter{W: errFile}
// Assign safe writers to new Logger
log := logolang.NewLoggerWriters(outSafe, outSafe, errSafe, errSafe)
log.Color = false // Disable color (read documentation)
log.Level = logolang.LevelDebug
log.Debug("debug test")
log.Info("info test")
log.Error("error test")
log.Critical("critical test")
}
func ExampleSafeWriter() {
// Create output files
outFile, err := os.Create("/tmp/out.log")
if err != nil {
panic(err)
}
defer outFile.Close()
errFile, err := os.Create("/tmp/err.log")
if err != nil {
panic(err)
}
defer errFile.Close()
// Create safe writers
outSafe := &logolang.SafeWriter{W: outFile}
errSafe := &logolang.SafeWriter{W: errFile}
// Assign safe writers to new Logger
log := logolang.NewLoggerWriters(outSafe, outSafe, errSafe, errSafe)
log.Color = false // Disable color (read documentation)
log.Level = logolang.LevelDebug
log.Debug("debug test")
log.Info("info test")
log.Error("error test")
log.Critical("critical test")
}
| ExampleLogger_formatter |
saml.ts | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { schema } from '@kbn/config-schema';
import { SAMLLoginStep } from '../../authentication';
import { createCustomResourceResponse } from '.';
import { RouteDefinitionParams } from '..';
/**
* Defines routes required for SAML authentication.
*/
export function defineSAMLRoutes({
router,
logger,
authc,
getLegacyAPI,
basePath,
}: RouteDefinitionParams) {
router.get(
{
path: '/api/security/saml/capture-url-fragment',
validate: false, | },
(context, request, response) => {
// We're also preventing `favicon.ico` request since it can cause new SAML handshake.
return response.custom(
createCustomResourceResponse(
`
<!DOCTYPE html>
<title>Kibana SAML Login</title>
<link rel="icon" href="data:,">
<script src="${basePath.serverBasePath}/api/security/saml/capture-url-fragment.js"></script>
`,
'text/html',
getLegacyAPI().cspRules
)
);
}
);
router.get(
{
path: '/api/security/saml/capture-url-fragment.js',
validate: false,
options: { authRequired: false },
},
(context, request, response) => {
return response.custom(
createCustomResourceResponse(
`
window.location.replace(
'${basePath.serverBasePath}/api/security/saml/start?redirectURLFragment=' + encodeURIComponent(window.location.hash)
);
`,
'text/javascript',
getLegacyAPI().cspRules
)
);
}
);
router.get(
{
path: '/api/security/saml/start',
validate: { query: schema.object({ redirectURLFragment: schema.string() }) },
options: { authRequired: false },
},
async (context, request, response) => {
try {
const authenticationResult = await authc.login(request, {
provider: 'saml',
value: {
step: SAMLLoginStep.RedirectURLFragmentCaptured,
redirectURLFragment: request.query.redirectURLFragment,
},
});
// When authenticating using SAML we _expect_ to redirect to the SAML Identity provider.
if (authenticationResult.redirected()) {
return response.redirected({ headers: { location: authenticationResult.redirectURL! } });
}
return response.unauthorized();
} catch (err) {
logger.error(err);
return response.internalError();
}
}
);
router.post(
{
path: '/api/security/saml/callback',
validate: {
body: schema.object({
SAMLResponse: schema.string(),
RelayState: schema.maybe(schema.string()),
}),
},
options: { authRequired: false },
},
async (context, request, response) => {
try {
// When authenticating using SAML we _expect_ to redirect to the SAML Identity provider.
const authenticationResult = await authc.login(request, {
provider: 'saml',
value: {
step: SAMLLoginStep.SAMLResponseReceived,
samlResponse: request.body.SAMLResponse,
},
});
if (authenticationResult.redirected()) {
return response.redirected({
headers: { location: authenticationResult.redirectURL! },
});
}
return response.unauthorized({ body: authenticationResult.error });
} catch (err) {
logger.error(err);
return response.internalError();
}
}
);
} | options: { authRequired: false }, |
tests.py | from django.contrib.auth.models import User
from django.urls import reverse_lazy
from .models import FakeNews
from ..utils.base_test import AuthenticationTestTemplate
class FakeNewsListTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.get
def _get_basename_url(self):
return "fakenews-list"
def _get_kwargs_url(self):
return {}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_list_10_obj_paginated_token(self):
fakenews = []
user = self.create_normal_user("author")
for i in range(0, 11):
fakenews.append(
FakeNews(
author=user,
title=f"test create fakenews title{i}",
subtitle=f"test create fakenews subtitle{i}",
body=f"test create fakenews body{i}",
)
)
FakeNews.objects.bulk_create(fakenews)
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._make_request()
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data.get("results")), 10)
self.assertEqual(resp.data.get("count"), 11)
self.assertIsNotNone(resp.data.get("next"))
class FakeNewsDetailTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.get
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
"""all logged user has permission.""" | pass
def test_detail_obj_token(self):
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.get(reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}))
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data.get("title"), fakenews.title)
def test_detail_not_found(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._client.get(reverse_lazy("fakenews-detail", kwargs={"slug": "slug-not-found"}))
self.assertEqual(resp.status_code, 404)
class FakeNewsCreateTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.post
def _get_basename_url(self):
return "fakenews-list"
def _get_kwargs_url(self):
return {}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_create_successful(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._client.post(
reverse_lazy("fakenews-list"),
data={
"title": "test create fakenews title",
"subtitle": "test create fakenews subtitle",
"body": "test create fakenews body",
},
)
self.assertEqual(resp.status_code, 201)
self.assertEqual(FakeNews.objects.last().slug, resp.data.get("slug"))
def test_create_already_exists(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.post(
reverse_lazy("fakenews-list"),
data={
"title": "test create fakenews title",
"subtitle": "test create fakenews subtitle",
"body": "test create fakenews body",
},
)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.data.get("title").pop(0), "fake news with this title already exists.")
def test_create_without_fields_required(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._make_request()
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.data.get("title").pop(0), "This field is required.")
class FakeNewsDeleteTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.delete
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_delete_successful(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.delete(reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}))
self.assertEqual(resp.status_code, 204)
class FakeNewsPatchTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.patch
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_patch_normal_user(self):
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.patch(
reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}),
data={"title": "title updated", "subtitle": "subtitle updated", "body": "body updated"},
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data.get("title"), "title updated")
self.assertEqual(resp.data.get("slug"), "title-updated")
self.assertEqual(FakeNews.objects.last().slug, "title-updated")
class FakeNewsUpdateTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.put
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_update_normal_user(self):
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.put(
reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}),
data={"title": "title updated", "subtitle": "subtitle updated", "body": "body updated"},
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data.get("title"), "title updated")
self.assertEqual(resp.data.get("slug"), "title-updated")
self.assertEqual(FakeNews.objects.last().slug, "title-updated")
# way to turn a test case class into an abstract
del AuthenticationTestTemplate | |
photoImposer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from photoCoordinates import photoCoordinates
from PIL import Image, ImageFont, ImageDraw
class photoImposer:
pc = photoCoordinates()
allCoordinates = pc.allCoordinates
all_images = {}
def | (self):
self.all_images['KICKOFF'] = "formationImages/kickoff_coverage.png"
self.all_images['KICKOFF_RETURN'] = "formationImages/kickoff_return.png"
self.all_images['PUNT'] = "formationImages/punt_coverage.png"
self.all_images['PUNT_RETURN'] = "formationImages/punt_return.png"
self.all_images['FIELDGOAL'] = "formationImages/fieldgoal_coverage.png"
self.all_images['FIELDGOAL_BLOCK'] = "formationImages/fieldgoal_block.png"
def imposeDataOnImage(self, playType, countsAndRatingsData, downloadPath):
coordinates = self.allCoordinates.get(playType)
image = Image.open(self.all_images.get(playType))
font = ImageFont.truetype('arial_bold.ttf', size=13)
draw = ImageDraw.Draw(image)
for position, positional_group in countsAndRatingsData.groupby(['POSITION']):
(x, y) = (0, 0)
if position in coordinates:
(x, y) = coordinates.get(position)
message = ''
for index, player in positional_group.iterrows():
message = message + str(player["PLAYER"]) + " " + str(player["COUNT"]) + " " + str(player["RATING"]) + '\n'
color = 'rgb(0, 0, 0)'
draw.text((x, y), message, fill=color, font=font)
imagename = './' + downloadPath + '/' + playType + '_ANALYSIS.png'
image.save(imagename)
| __init__ |
test_dataset_geo.py | from copy import deepcopy
from functools import partial
import json
from unittest import TestCase
import responses
from tamr_unify_client import Client
from tamr_unify_client.auth import UsernamePasswordAuth
from tamr_unify_client.models.dataset.resource import Dataset
class | (TestCase):
def setUp(self):
auth = UsernamePasswordAuth("username", "password")
self.unify = Client(auth)
def test_record_to_feature(self):
empty_record = {"id": "1"}
def key_value_single(rec):
return rec["id"]
actual = Dataset._record_to_feature(
empty_record, key_value_single, ["id"], "geom"
)
expected = {"type": "Feature", "id": "1"}
self.assertEqual(expected, actual)
record_with_point = {"id": "1", "geom": {"point": [1, 1]}}
actual = Dataset._record_to_feature(
record_with_point, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"geometry": {"type": "Point", "coordinates": [1, 1]},
}
self.assertEqual(expected, actual)
record_with_multi_point = {"id": "1", "geom": {"multiPoint": [[1, 1]]}}
actual = Dataset._record_to_feature(
record_with_multi_point, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"geometry": {"type": "MultiPoint", "coordinates": [[1, 1]]},
}
self.assertEqual(expected, actual)
record_with_line = {"id": "1", "geom": {"lineString": [[1, 1], [2, 2]]}}
actual = Dataset._record_to_feature(
record_with_line, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"geometry": {"type": "LineString", "coordinates": [[1, 1], [2, 2]]},
}
self.assertEqual(expected, actual)
record_with_multi_line = {
"id": "1",
"geom": {"multiLineString": [[[1, 1], [2, 2]]]},
}
actual = Dataset._record_to_feature(
record_with_multi_line, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"geometry": {"type": "MultiLineString", "coordinates": [[[1, 1], [2, 2]]]},
}
self.assertEqual(expected, actual)
record_with_polygon = {
"id": "1",
"geom": {"polygon": [[[1, 1], [2, 2], [3, 3]]]},
}
actual = Dataset._record_to_feature(
record_with_polygon, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"geometry": {"type": "Polygon", "coordinates": [[[1, 1], [2, 2], [3, 3]]]},
}
self.assertEqual(expected, actual)
record_with_multi_polygon = {
"id": "1",
"geom": {"multiPolygon": [[[[1, 1], [2, 2], [3, 3]]]]},
}
actual = Dataset._record_to_feature(
record_with_multi_polygon, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"geometry": {
"type": "MultiPolygon",
"coordinates": [[[[1, 1], [2, 2], [3, 3]]]],
},
}
self.assertEqual(expected, actual)
record_with_full_geo = {
"id": "1",
"geom": {
"point": None,
"multiPoint": None,
"lineString": None,
"multiLineString": None,
"polygon": None,
"multiPolygon": [[[[1, 1], [2, 2], [3, 3]]]],
},
}
actual = Dataset._record_to_feature(
record_with_full_geo, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"geometry": {
"type": "MultiPolygon",
"coordinates": [[[[1, 1], [2, 2], [3, 3]]]],
},
}
self.assertEqual(expected, actual)
record_with_null_geo = {
"id": "1",
"geom": {
"point": None,
"multiPoint": None,
"lineString": None,
"multiLineString": None,
"polygon": None,
"multiPolygon": None,
},
}
actual = Dataset._record_to_feature(
record_with_null_geo, key_value_single, ["id"], "geom"
)
expected = {"type": "Feature", "id": "1"}
self.assertEqual(expected, actual)
record_with_bbox = {"id": "1", "bbox": [[0, 0], [1, 1]]}
actual = Dataset._record_to_feature(
record_with_bbox, key_value_single, ["id"], "geom"
)
expected = {"type": "Feature", "id": "1", "bbox": [[0, 0], [1, 1]]}
self.assertEqual(expected, actual)
record_with_props = {"id": "1", "p1": "v1", "p2": "v2"}
actual = Dataset._record_to_feature(
record_with_props, key_value_single, ["id"], "geom"
)
expected = {
"type": "Feature",
"id": "1",
"properties": {"p1": "v1", "p2": "v2"},
}
self.assertEqual(expected, actual)
def key_value_composite(rec):
return [rec[v] for v in ["id1", "id2"]]
record_with_composite_key = {"id1": "1", "id2": "2"}
actual = Dataset._record_to_feature(
record_with_composite_key, key_value_composite, ["id1", "id2"], "geom"
)
expected = {"type": "Feature", "id": ["1", "2"]}
self.assertEqual(expected, actual)
record_with_everything = {
"id1": "1",
"id2": "2",
"bbox": [[0, 0], [1, 1]],
"name": "record with everything",
"geom": {
"point": None,
"multiPoint": None,
"lineString": None,
"multiLineString": None,
"polygon": [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]],
"multiPolygon": None,
},
"alternate_geom": {
"point": [1, 1],
"multiPoint": None,
"lineString": None,
"multiLineString": None,
"polygon": None,
"multiPolygon": None,
},
}
actual = Dataset._record_to_feature(
record_with_everything, key_value_composite, ["id1", "id2"], "geom"
)
expected = {
"type": "Feature",
"id": ["1", "2"],
"bbox": [[0, 0], [1, 1]],
"properties": {
"name": "record with everything",
"alternate_geom": {
"point": [1, 1],
"multiPoint": None,
"lineString": None,
"multiLineString": None,
"polygon": None,
"multiPolygon": None,
},
},
"geometry": {
"type": "Polygon",
"coordinates": [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]],
},
}
self.assertEqual(expected, actual)
record_without_geo = {"id": "1", "prop1": "val1"}
actual = Dataset._record_to_feature(
record_without_geo, key_value_single, ["id"], None
)
expected = {"type": "Feature", "id": "1", "properties": {"prop1": "val1"}}
self.assertEqual(expected, actual)
@responses.activate
def test_geo_features(self):
dataset_url = f"http://localhost:9100/api/versioned/v1/datasets/1"
responses.add(responses.GET, dataset_url, json=self._dataset_json)
attributes_url = f"{dataset_url}/attributes"
responses.add(responses.GET, attributes_url, json=self._attributes_json)
records_url = f"{dataset_url}/records"
responses.add(
responses.GET,
records_url,
body="\n".join([json.dumps(rec) for rec in self._records_json]),
)
dataset = self.unify.datasets.by_resource_id("1")
features = [feature for feature in dataset.itergeofeatures()]
self.assertEqual(6, len(features))
self.assertSetEqual(
{
"point",
"multiPoint",
"lineString",
"multiLineString",
"polygon",
"multiPolygon",
},
{feature["id"] for feature in features},
)
@responses.activate
def test_geo_interface(self):
dataset_url = f"http://localhost:9100/api/versioned/v1/datasets/1"
responses.add(responses.GET, dataset_url, json=self._dataset_json)
attributes_url = f"{dataset_url}/attributes"
responses.add(responses.GET, attributes_url, json=self._attributes_json)
records_url = f"{dataset_url}/records"
responses.add(
responses.GET,
records_url,
body="\n".join([json.dumps(rec) for rec in self._records_json]),
)
dataset = self.unify.datasets.by_resource_id("1")
fc = dataset.__geo_interface__
self.assertEqual("FeatureCollection", fc["type"])
self.assertSetEqual(
{
"point",
"multiPoint",
"lineString",
"multiLineString",
"polygon",
"multiPolygon",
},
{feature["id"] for feature in fc["features"]},
)
def test_feature_to_record(self):
feature = {"type": "Feature", "id": "1"}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1"}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"geometry": {"type": "Point", "coordinates": [0, 0]},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"point": [0, 0]}}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"geometry": {"type": "MultiPoint", "coordinates": [[0, 0], [1, 1]]},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"multiPoint": [[0, 0], [1, 1]]}}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"geometry": {"type": "LineString", "coordinates": [[0, 0], [1, 1]]},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"lineString": [[0, 0], [1, 1]]}}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"geometry": {
"type": "MultiLineString",
"coordinates": [[[0, 0], [1, 1], [2, 2]]],
},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"multiLineString": [[[0, 0], [1, 1], [2, 2]]]}}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"geometry": {"type": "Polygon", "coordinates": [[[0, 0], [1, 1], [2, 2]]]},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"polygon": [[[0, 0], [1, 1], [2, 2]]]}}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"geometry": {
"type": "MultiPolygon",
"coordinates": [[[[0, 0], [1, 1], [2, 2]]]],
},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"multiPolygon": [[[[0, 0], [1, 1], [2, 2]]]]}}
self.assertEqual(expected, actual)
feature = {"type": "Feature", "id": "1", "geometry": None}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1"}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"bbox": [0, 0, 1, 1],
"geometry": {"type": "Point", "coordinates": [0, 0]},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"point": [0, 0]}, "bbox": [0, 0, 1, 1]}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"bbox": None,
"geometry": {"type": "Point", "coordinates": [0, 0]},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"point": [0, 0]}}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"bbox": [0, 0, 1, 1],
"geometry": {"type": "Point", "coordinates": [0, 0]},
"properties": {"prop1": "val1", "prop2": "val2"},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {
"pk": "1",
"geo": {"point": [0, 0]},
"bbox": [0, 0, 1, 1],
"prop1": "val1",
"prop2": "val2",
}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"bbox": [0, 0, 1, 1],
"geometry": {"type": "Point", "coordinates": [0, 0]},
"properties": None,
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"point": [0, 0]}, "bbox": [0, 0, 1, 1]}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": "1",
"bbox": [0, 0, 1, 1],
"geometry": {"type": "Point", "coordinates": [0, 0]},
# Properties with names that conflict with
# the props in the key or geometry
# get ignored
"properties": {"pk": "val1", "geo": "val2", "bbox": "val3"},
}
actual = Dataset._feature_to_record(feature, ["pk"], "geo")
expected = {"pk": "1", "geo": {"point": [0, 0]}, "bbox": [0, 0, 1, 1]}
self.assertEqual(expected, actual)
feature = {
"type": "Feature",
"id": ["1", "2"],
"geometry": {"type": "Point", "coordinates": [0, 0]},
}
actual = Dataset._feature_to_record(feature, ["pk1", "pk2"], "geo")
expected = {"pk1": "1", "pk2": "2", "geo": {"point": [0, 0]}}
self.assertEqual(expected, actual)
class NotAFeature:
@property
def __geo_interface__(self):
return {
"type": "Feature",
"id": "1",
"geometry": {"type": "Point", "coordinates": [0, 0]},
}
naf = NotAFeature()
actual = Dataset._feature_to_record(naf, ["pk"], "geo")
expected = {"pk": "1", "geo": {"point": [0, 0]}}
self.assertEqual(expected, actual)
@responses.activate
def test_from_geo_features(self):
def update_callback(request, snoop):
snoop["payload"] = request.body
return 200, {}, "{}"
dataset_url = f"http://localhost:9100/api/versioned/v1/datasets/1"
responses.add(responses.GET, dataset_url, json=self._dataset_json)
attributes_url = f"{dataset_url}/attributes"
responses.add(responses.GET, attributes_url, json=self._attributes_json)
records_url = f"{dataset_url}:updateRecords"
snoop = {}
responses.add_callback(
responses.POST, records_url, callback=partial(update_callback, snoop=snoop)
)
dataset = self.unify.datasets.by_resource_id("1")
features = [
{"id": "1", "geometry": {"type": "Point", "coordinates": [0, 0]}},
{"id": "2", "geometry": {"type": "Point", "coordinates": [1, 1]}},
]
dataset.from_geo_features(features)
updates = [
{
"action": "CREATE",
"recordId": "1",
"record": {"geom": {"point": [0, 0]}, "id": "1"},
},
{
"action": "CREATE",
"recordId": "2",
"record": {"geom": {"point": [1, 1]}, "id": "2"},
},
]
expected = updates
actual = [json.loads(item) for item in snoop["payload"]]
self.assertEqual(expected, actual)
class NotAFeatureCollection:
@property
def __geo_interface__(self):
return {"type": "FeatureCollection", "features": features}
snoop["payload"] = None
nafc = NotAFeatureCollection()
dataset.from_geo_features(nafc)
actual = [json.loads(item) for item in snoop["payload"]]
self.assertEqual(expected, actual)
@responses.activate
def test_from_geo_features_composite_key(self):
def update_callback(request, snoop):
snoop["payload"] = request.body
return 200, {}, "{}"
composite_key_dataset_json = deepcopy(self._dataset_json)
composite_key_dataset_json["keyAttributeNames"] = ["id1", "id2"]
dataset_url = f"http://localhost:9100/api/versioned/v1/datasets/1"
responses.add(responses.GET, dataset_url, json=composite_key_dataset_json)
composite_key_attributes_json = deepcopy(self._attributes_json)
composite_key_attributes_json[0]["name"] = "id1"
composite_key_attributes_json.insert(
1, deepcopy(composite_key_attributes_json[0])
)
composite_key_attributes_json[1]["name"] = "id2"
attributes_url = f"{dataset_url}/attributes"
responses.add(responses.GET, attributes_url, json=composite_key_attributes_json)
records_url = f"{dataset_url}:updateRecords"
snoop = {}
responses.add_callback(
responses.POST, records_url, callback=partial(update_callback, snoop=snoop)
)
dataset = self.unify.datasets.by_resource_id("1")
features = [
{"id": ["1", "a"], "geometry": {"type": "Point", "coordinates": [0, 0]}},
{"id": ["2", "b"], "geometry": {"type": "Point", "coordinates": [1, 1]}},
]
dataset.from_geo_features(features)
updates = [
{
"action": "CREATE",
"compositeRecordId": ["1", "a"],
"record": {"geom": {"point": [0, 0]}, "id1": "1", "id2": "a"},
},
{
"action": "CREATE",
"compositeRecordId": ["2", "b"],
"record": {"geom": {"point": [1, 1]}, "id1": "2", "id2": "b"},
},
]
expected = updates
actual = [json.loads(item) for item in snoop["payload"]]
self.assertEqual(expected, actual)
_dataset_json = {
"id": "unify://unified-data/v1/datasets/1",
"externalId": "number 1",
"name": "dataset 1 name",
"description": "dataset 1 description",
"version": "dataset 1 version",
"keyAttributeNames": ["id"],
"tags": [],
"created": {
"username": "admin",
"time": "2018-09-10T16:06:20.636Z",
"version": "dataset 1 created version",
},
"lastModified": {
"username": "admin",
"time": "2018-09-10T16:06:20.851Z",
"version": "dataset 1 modified version",
},
"relativeId": "datasets/1",
"upstreamDatasetIds": [],
}
_attributes_json = [
{
"name": "id",
"description": "primary key",
"type": {"baseType": "STRING", "attributes": []},
"isNullable": False,
},
{
"name": "geom",
"description": "Geospatial geometry",
"type": {
"baseType": "RECORD",
"attributes": [
{
"name": "point",
"type": {
"baseType": "ARRAY",
"innerType": {"baseType": "DOUBLE", "attributes": []},
"attributes": [],
},
"isNullable": True,
},
{
"name": "multiPoint",
"type": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {"baseType": "DOUBLE", "attributes": []},
"attributes": [],
},
"attributes": [],
},
"isNullable": True,
},
{
"name": "lineString",
"type": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {"baseType": "DOUBLE", "attributes": []},
"attributes": [],
},
"attributes": [],
},
"isNullable": True,
},
{
"name": "multiLineString",
"type": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {
"baseType": "DOUBLE",
"attributes": [],
},
"attributes": [],
},
"attributes": [],
},
"attributes": [],
},
"isNullable": True,
},
{
"name": "polygon",
"type": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {
"baseType": "DOUBLE",
"attributes": [],
},
"attributes": [],
},
"attributes": [],
},
"attributes": [],
},
"isNullable": True,
},
{
"name": "multiPolygon",
"type": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {
"baseType": "ARRAY",
"innerType": {
"baseType": "DOUBLE",
"attributes": [],
},
"attributes": [],
},
"attributes": [],
},
"attributes": [],
},
"attributes": [],
},
"isNullable": True,
},
],
},
"isNullable": False,
},
]
_records_json = [
{"id": "point", "geom": {"point": [1, 1]}},
{"id": "multiPoint", "geom": {"multiPoint": [[1, 1], [2, 2]]}},
{"id": "lineString", "geom": {"lineString": [[1, 1], [2, 2]]}},
{
"id": "multiLineString",
"geom": {"multiLineString": [[[1, 1], [2, 2]], [[3, 3], [4, 4]]]},
},
{"id": "polygon", "geom": {"polygon": [[[1, 1], [2, 2], [3, 3], [1, 1]]]}},
{
"id": "multiPolygon",
"geom": {
"multiPolygon": [
[[[1, 1], [2, 2], [3, 3], [1, 1]]],
[[[4, 4], [5, 5], [6, 6], [4, 4]]],
]
},
},
]
| TestDatasetGeo |
profile.py | from collections import namedtuple
from itertools import starmap
from multiprocessing import Pipe, Process, current_process
from time import sleep
from timeit import default_timer
from ..callbacks import Callback
from ..utils import import_required
# Stores execution data for each task
TaskData = namedtuple(
"TaskData", ("key", "task", "start_time", "end_time", "worker_id")
)
class Profiler(Callback):
"""A profiler for dask execution at the task level.
Records the following information for each task:
1. Key
2. Task
3. Start time in seconds since the epoch
4. Finish time in seconds since the epoch
5. Worker id
Examples
--------
>>> from operator import add, mul
>>> from dask.threaded import get
>>> from dask.diagnostics import Profiler
>>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> with Profiler() as prof:
... get(dsk, 'z')
22
>>> prof.results # doctest: +SKIP
[TaskData(key='y', task=(add, 'x', 10), start_time=..., end_time=..., worker_id=...),
TaskData(key='z', task=(mul, 'y', 2), start_time=..., end_time=..., worker_id=...)]
These results can be visualized in a bokeh plot using the ``visualize``
method. Note that this requires bokeh to be installed.
>>> prof.visualize() # doctest: +SKIP
You can activate the profiler globally
>>> prof.register()
If you use the profiler globally you will need to clear out old results
manually.
>>> prof.clear()
"""
def __init__(self):
self._results = {}
self.results = []
self._dsk = {}
def __enter__(self):
self.clear()
return super().__enter__()
def _start(self, dsk):
self._dsk.update(dsk)
def _pretask(self, key, dsk, state):
start = default_timer()
self._results[key] = (key, dsk[key], start)
def _posttask(self, key, value, dsk, state, id):
end = default_timer()
self._results[key] += (end, id)
def _finish(self, dsk, state, failed):
results = {k: v for k, v in self._results.items() if len(v) == 5}
self.results += list(starmap(TaskData, results.values()))
self._results.clear()
def _plot(self, **kwargs):
from .profile_visualize import plot_tasks
return plot_tasks(self.results, self._dsk, **kwargs)
def visualize(self, **kwargs):
"""Visualize the profiling run in a bokeh plot.
See also
--------
dask.diagnostics.profile_visualize.visualize
"""
from .profile_visualize import visualize
return visualize(self, **kwargs)
def clear(self):
"""Clear out old results from profiler"""
self._results.clear()
del self.results[:]
self._dsk = {}
ResourceData = namedtuple("ResourceData", ("time", "mem", "cpu"))
class ResourceProfiler(Callback):
"""A profiler for resource use.
Records the following each timestep
1. Time in seconds since the epoch
2. Memory usage in MB
3. % CPU usage
Examples
--------
>>> from operator import add, mul
>>> from dask.threaded import get
>>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> with ResourceProfiler() as prof:
... get(dsk, 'z')
22
These results can be visualized in a bokeh plot using the ``visualize``
method. Note that this requires bokeh to be installed.
>>> prof.visualize() # doctest: +SKIP
You can activate the profiler globally
>>> prof.register()
If you use the profiler globally you will need to clear out old results
manually.
>>> prof.clear()
Note that when used as a context manager data will be collected throughout
the duration of the enclosed block. In contrast, when registered globally
data will only be collected while a dask scheduler is active.
"""
def __init__(self, dt=1):
self._dt = dt
self._entered = False
self._tracker = None
self.results = []
def _is_running(self):
return self._tracker is not None and self._tracker.is_alive()
def _start_collect(self):
if not self._is_running():
self._tracker = _Tracker(self._dt)
self._tracker.start()
self._tracker.parent_conn.send("collect")
def _stop_collect(self):
if self._is_running():
self._tracker.parent_conn.send("send_data")
self.results.extend(starmap(ResourceData, self._tracker.parent_conn.recv()))
def __enter__(self):
self._entered = True
self.clear()
self._start_collect()
return super().__enter__()
def __exit__(self, *args):
self._entered = False
self._stop_collect()
self.close()
super().__exit__(*args)
def _start(self, dsk):
self._start_collect()
def _finish(self, dsk, state, failed):
if not self._entered:
self._stop_collect()
def close(self):
"""Shutdown the resource tracker process"""
if self._is_running():
self._tracker.shutdown()
self._tracker = None
__del__ = close
def clear(self):
self.results = []
def _plot(self, **kwargs):
from .profile_visualize import plot_resources
return plot_resources(self.results, **kwargs)
def visualize(self, **kwargs):
"""Visualize the profiling run in a bokeh plot.
See also
--------
dask.diagnostics.profile_visualize.visualize
"""
from .profile_visualize import visualize
return visualize(self, **kwargs)
class _Tracker(Process):
"""Background process for tracking resource usage"""
def __init__(self, dt=1):
super().__init__()
self.daemon = True
self.dt = dt
self.parent_pid = current_process().pid
self.parent_conn, self.child_conn = Pipe()
def shutdown(self):
if not self.parent_conn.closed:
self.parent_conn.send("shutdown")
self.parent_conn.close()
self.join()
def _update_pids(self, pid):
return [self.parent] + [
p for p in self.parent.children() if p.pid != pid and p.status() != "zombie"
]
def run(self):
psutil = import_required(
"psutil", "Tracking resource usage requires `psutil` to be installed"
)
self.parent = psutil.Process(self.parent_pid)
pid = current_process()
data = []
while True:
try:
msg = self.child_conn.recv()
except KeyboardInterrupt:
continue
if msg == "shutdown":
break
elif msg == "collect":
ps = self._update_pids(pid)
while not data or not self.child_conn.poll():
tic = default_timer()
mem = cpu = 0
for p in ps:
try:
mem2 = p.memory_info().rss
cpu2 = p.cpu_percent()
except Exception: # could be a few different exceptions
pass
else:
# Only increment if both were successful
mem += mem2
cpu += cpu2
data.append((tic, mem / 1e6, cpu))
sleep(self.dt)
elif msg == "send_data":
self.child_conn.send(data)
data = []
self.child_conn.close()
CacheData = namedtuple( | "CacheData", ("key", "task", "metric", "cache_time", "free_time")
)
class CacheProfiler(Callback):
"""A profiler for dask execution at the scheduler cache level.
Records the following information for each task:
1. Key
2. Task
3. Size metric
4. Cache entry time in seconds since the epoch
5. Cache exit time in seconds since the epoch
Examples
--------
>>> from operator import add, mul
>>> from dask.threaded import get
>>> from dask.diagnostics import CacheProfiler
>>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> with CacheProfiler() as prof:
... get(dsk, 'z')
22
>>> prof.results # doctest: +SKIP
[CacheData(key='y', task=(add, 'x', 10), metric=1, cache_time=..., free_time=...),
CacheData(key='z', task=(mul, 'y', 2), metric=1, cache_time=..., free_time=...)]
The default is to count each task (``metric`` is 1 for all tasks). Other
functions may used as a metric instead through the ``metric`` keyword. For
example, the ``nbytes`` function found in ``cachey`` can be used to measure
the number of bytes in the cache.
>>> from cachey import nbytes
>>> with CacheProfiler(metric=nbytes) as prof:
... get(dsk, 'z')
22
The profiling results can be visualized in a bokeh plot using the
``visualize`` method. Note that this requires bokeh to be installed.
>>> prof.visualize() # doctest: +SKIP
You can activate the profiler globally
>>> prof.register()
If you use the profiler globally you will need to clear out old results
manually.
>>> prof.clear()
"""
def __init__(self, metric=None, metric_name=None):
self.clear()
self._metric = metric if metric else lambda value: 1
if metric_name:
self._metric_name = metric_name
elif metric:
self._metric_name = metric.__name__
else:
self._metric_name = "count"
def __enter__(self):
self.clear()
return super().__enter__()
def _start(self, dsk):
self._dsk.update(dsk)
if not self._start_time:
self._start_time = default_timer()
def _posttask(self, key, value, dsk, state, id):
t = default_timer()
self._cache[key] = (self._metric(value), t)
for k in state["released"] & self._cache.keys():
metric, start = self._cache.pop(k)
self.results.append(CacheData(k, dsk[k], metric, start, t))
def _finish(self, dsk, state, failed):
t = default_timer()
for k, (metric, start) in self._cache.items():
self.results.append(CacheData(k, dsk[k], metric, start, t))
self._cache.clear()
def _plot(self, **kwargs):
from .profile_visualize import plot_cache
return plot_cache(
self.results, self._dsk, self._start_time, self._metric_name, **kwargs
)
def visualize(self, **kwargs):
"""Visualize the profiling run in a bokeh plot.
See also
--------
dask.diagnostics.profile_visualize.visualize
"""
from .profile_visualize import visualize
return visualize(self, **kwargs)
def clear(self):
"""Clear out old results from profiler"""
self.results = []
self._cache = {}
self._dsk = {}
self._start_time = None | |
d3_helpers.js | /*global d3 */
var formatDate = d3.time.format.utc("%B %d, %Y"),
formatMonthYear = d3.time.format.utc("%B %Y"),
formatYear = d3.time.format.utc("%Y"),
formatTime = d3.time.format.utc("%H:%M UTC"),
formatWeek = d3.time.format.utc("%U"),
formatHour = d3.time.format.utc("%H"),
formatNumber = d3.format(",d"),
formatFixed = d3.format(",.0f"),
formatPercent = d3.format(",.0%");
function numberWithDelimiter(number) {
if(number !== 0) {
return formatFixed(number);
} else {
return null;
}
}
// Format file size into human-readable format
function numberToHumanSize(bytes) {
var thresh = 1000;
if(bytes < thresh) { return bytes + ' B'; }
var units = ['KB','MB','GB','TB','PB'];
var u = -1;
do { bytes /= thresh; ++u; } while(bytes >= thresh);
return bytes.toFixed(1) + ' ' + units[u];
}
// construct date object from date parts
function datePartsToDate(date_parts) {
var len = date_parts.length;
// not in expected format
if (len === 0 || len > 3) { return null; }
// turn numbers to strings and pad with 0
for (var i = 0; i < len; ++i) {
if (date_parts[i] < 10) {
date_parts[i] = "0" + date_parts[i];
} else {
date_parts[i] = "" + date_parts[i];
}
}
// convert to date, workaround for different time zones
var timestamp = Date.parse(date_parts.join('-') + 'T12:00');
return new Date(timestamp);
}
// format date
function formattedDate(date, len) {
switch (len) {
case 1:
return formatYear(date);
case 2:
return formatMonthYear(date);
case 3:
return formatDate(date);
}
}
// pagination
function paginate(json) {
if ((json.meta.page !== "") && json.meta.total_pages > 1) {
var prev = (json.meta.page > 1) ? "«" : null;
var next = (json.meta.page < json.meta.total_pages) ? "»" : null;
d3.select("#content").append("div")
.attr("id", "paginator")
.attr("class", "text-center");
$('#paginator').bootpag({
total: json.meta.total_pages,
page: json.meta.page,
maxVisible: 10,
href: json.href,
leaps: false,
prev: prev,
next: next
});
}
}
// link to individual work
function pathForWork(id) {
if (typeof id === "undefined") { return ""; };
if (id.substring(0, 15) === "http://doi.org/" ||
id.substring(0, 35) === "http://www.ncbi.nlm.nih.gov/pubmed/" ||
id.substring(0, 41) === "http://www.ncbi.nlm.nih.gov/pmc/works/PMC" ||
id.substring(0, 41) === "http://www.ncbi.nlm.nih.gov/pmc/works/PMC" ||
id.substring(0, 21) === "http://arxiv.org/abs/" ||
id.substring(0, 15) === "http://n2t.net/") {
return id.replace(/^https?:\/\//,'');
} else {
return id;
}
} | name = source_id;
} else if (typeof sort !== "undefined" && sort !== "") {
name = sort;
}
if (name !== "") {
var source = sources.filter(function(d) { return d.id === name; })[0];
}
if (typeof source !== "undefined" && source !== "") {
var a = [source.title + ": " + formatFixed(work.events[name])];
} else {
var a = [];
}
var b = [],
signposts = signpostsFromWork(work);
if (signposts.viewed > 0) { b.push("Viewed: " + formatFixed(signposts.viewed)); }
if (signposts.cited > 0) { b.push("Cited: " + formatFixed(signposts.cited)); }
if (signposts.saved > 0) { b.push("Saved: " + formatFixed(signposts.saved)); }
if (signposts.discussed > 0) { b.push("Discussed: " + formatFixed(signposts.discussed)); }
if (b.length > 0) {
a.push(b.join(" • "));
return a.join(" | ");
} else if (a.length > 0) {
return a;
} else {
return "";
}
}
function signpostsFromWork(work) {
var viewed = (work.events.counter || 0) + (work.events.pmc || 0);
var cited = work.events.crossref;
var saved = (work.events.citeulike || 0) + (work.events.mendeley || 0);
var discussed = (work.events.facebook || 0) + (work.events.twitter || 0) + (work.events.twitter_search || 0);
return { "viewed": viewed, "cited": cited, "saved": saved, "discussed": discussed };
}
function relationToString(work, sources, relation_types) {
var source = sources.filter(function(d) { return d.id === work.source_id; })[0];
if (typeof source == "undefined" || source === "") { return []; }
var relation_type = relation_types.filter(function(d) { return d.id === work.relation_type_id; })[0];
if (typeof relation_type == "undefined" || relation_type === "") { return []; }
return [relation_type.inverse_title, " via " + source.title];
}
// construct author object from author parts
function formattedAuthor(author) {
author = author.map(function(d) { return d.given + " " + d.family; });
switch (author.length) {
case 0:
case 1:
case 2:
return author.join(" & ");
case 3:
case 4:
return author.slice(0,-1).join(", ") + " & " + author[author.length - 1];
default:
return author.slice(0,3).join(", ") + ", <em>et al</em>";
}
}
// format event type
function formattedType(type) {
var types = { "article-journal": "Journal article",
"article-newspaper": "News",
"post": "Blog post",
"webpage": "Web page",
"broadcast": "Podcast/Video",
"personal_communication": "Personal communication" };
return types[type] || "Other";
} |
function signpostsToString(work, sources, source_id, sort) {
var name = "";
if (typeof source_id !== "undefined" && source_id !== "") { |
models.py | import sqlalchemy as sa
from enum import Enum
from sqlalchemy.orm import relationship, backref
from typing import List
from datetime import date
from dataclasses import dataclass, field
from marshmallow import validate
from datahub.db import ModelBase, Session
from datahub.measurements import MeasurementQuery
from datahub.common import SummaryResolution, SummaryGroup, DateRange
from datahub.meteringpoints import MeteringPoint
class DisclosureState(Enum):
PENDING = 'PENDING'
PROCESSING = 'PROCESSING'
AVAILABLE = 'AVAILABLE'
class Disclosure(ModelBase):
"""
banner_status = postgresql.ENUM('PENDING', 'PROCESSING', 'AVAILABLE', name='disclosurestate')
banner_status.create(op.get_bind())
op.add_column('disclosure', sa.Column('state', sa.Enum('PENDING', 'PROCESSING', 'AVAILABLE', name='disclosurestate'), nullable=False))
"""
__tablename__ = 'disclosure'
__table_args__ = (
sa.UniqueConstraint('public_id'),
)
id = sa.Column(sa.Integer(), primary_key=True, index=True)
public_id = sa.Column(sa.String(), index=True)
created = sa.Column(sa.DateTime(timezone=True), server_default=sa.func.now())
state = sa.Column(sa.Enum(DisclosureState), nullable=False)
sub = sa.Column(sa.String(), index=True, nullable=False)
begin = sa.Column(sa.Date(), nullable=False)
end = sa.Column(sa.Date(), nullable=False)
name = sa.Column(sa.String(), nullable=False)
description = sa.Column(sa.String())
max_resolution = sa.Column(sa.Enum(SummaryResolution), nullable=False)
publicize_meteringpoints = sa.Column(sa.Boolean(), nullable=False)
publicize_gsrn = sa.Column(sa.Boolean(), nullable=False)
publicize_physical_address = sa.Column(sa.Boolean(), nullable=False)
@property
def date_range(self):
"""
:rtype: DateRange
"""
return DateRange(begin=self.begin, end=self.end)
def add_meteringpoint(self, meteringpoint):
"""
:param MeteringPoint meteringpoint:
"""
self.meteringpoints.append(
DisclosureMeteringPoint(gsrn=meteringpoint.gsrn))
def get_gsrn(self):
"""
:rtype list[str]:
"""
return [mp.gsrn for mp in self.meteringpoints]
def get_measurements(self):
"""
:rtype: MeasurementQuery
"""
return MeasurementQuery(Session.object_session(self)) \
.is_active() \
.has_any_gsrn(self.get_gsrn()) \
.begins_within(self.date_range.to_datetime_range()) \
.is_published()
class DisclosureMeteringPoint(ModelBase):
"""
TODO
"""
__tablename__ = 'disclosure_meteringpoint'
__table_args__ = (
sa.UniqueConstraint('disclosure_id', 'gsrn'),
)
id = sa.Column(sa.Integer(), primary_key=True, index=True)
disclosure_id = sa.Column(sa.Integer(), sa.ForeignKey('disclosure.id', ondelete='CASCADE'), index=True, nullable=False)
disclosure = relationship('Disclosure', foreign_keys=[disclosure_id], backref=backref('meteringpoints', passive_deletes=True))
gsrn = sa.Column(sa.String(), sa.ForeignKey('meteringpoint.gsrn'), nullable=False)
meteringpoint = relationship('MeteringPoint', foreign_keys=[gsrn])
class DisclosureSettlement(ModelBase):
"""
TODO
"""
__tablename__ = 'disclosure_settlement'
__table_args__ = (
sa.UniqueConstraint('disclosure_id', 'measurement_id'),
sa.UniqueConstraint('disclosure_id', 'address'),
)
id = sa.Column(sa.Integer(), primary_key=True, index=True)
disclosure_id = sa.Column(sa.Integer(), sa.ForeignKey('disclosure.id', ondelete='CASCADE'), index=True, nullable=False)
disclosure = relationship('Disclosure', foreign_keys=[disclosure_id], backref=backref('settlements', passive_deletes=True))
measurement_id = sa.Column(sa.Integer(), sa.ForeignKey('measurement.id'), index=True)
measurement = relationship('Measurement', foreign_keys=[measurement_id])
address = sa.Column(sa.String(), nullable=False)
class DisclosureRetiredGgo(ModelBase):
"""
TODO
"""
__tablename__ = 'disclosure_ggo'
__table_args__ = (
sa.UniqueConstraint('settlement_id', 'address'),
)
id = sa.Column(sa.Integer(), primary_key=True, index=True)
settlement_id = sa.Column(sa.Integer(), sa.ForeignKey('disclosure_settlement.id', ondelete='CASCADE'), index=True, nullable=False)
settlement = relationship('DisclosureSettlement', foreign_keys=[settlement_id], backref=backref('ggos', passive_deletes=True))
address = sa.Column(sa.String(), nullable=False)
amount = sa.Column(sa.Integer(), nullable=False)
begin = sa.Column(sa.DateTime(timezone=True), nullable=False)
end = sa.Column(sa.DateTime(timezone=True), nullable=False)
sector = sa.Column(sa.String(), nullable=False)
technology_code = sa.Column(sa.String(), nullable=False)
fuel_code = sa.Column(sa.String(), nullable=False)
# -- Common ------------------------------------------------------------------
@dataclass
class MappedDisclosure:
public_id: str = field(metadata=dict(data_key='id'))
name: str
description: str
begin: date
end: date
publicize_meteringpoints: bool = field(metadata=dict(data_key='publicizeMeteringpoints'))
publicize_gsrn: bool = field(metadata=dict(data_key='publicizeGsrn'))
publicize_physical_address: bool = field(metadata=dict(data_key='publicizePhysicalAddress'))
# -- GetDisclosure request and response --------------------------------------
@dataclass
class DisclosureDataSeries:
gsrn: str = field(default=None)
address: str = field(default=None)
measurements: List[int] = field(default_factory=list)
ggos: List[SummaryGroup] = field(default_factory=list)
@dataclass
class GetDisclosureRequest:
id: str
# Offset from UTC in hours
utc_offset: int = field(metadata=dict(required=False, missing=0, data_key='utcOffset'))
resolution: SummaryResolution = field(default=None)
date_range: DateRange = field(default=None, metadata=dict(data_key='dateRange'))
@dataclass
class GetDisclosureResponse:
|
# -- GetDisclosureList request and response ----------------------------------
@dataclass
class GetDisclosureListResponse:
success: bool
disclosures: List[MappedDisclosure] = field(default_factory=list)
# -- CreateDisclosure request and response -----------------------------------
@dataclass
class CreateDisclosureRequest:
name: str
description: str
begin: date
end: date
max_resolution: SummaryResolution = field(metadata=dict(data_key='maxResolution'))
publicize_meteringpoints: bool = field(metadata=dict(data_key='publicizeMeteringpoints'))
publicize_gsrn: bool = field(metadata=dict(data_key='publicizeGsrn'))
publicize_physical_address: bool = field(metadata=dict(data_key='publicizePhysicalAddress'))
gsrn: List[str] = field(metadata=dict(validate=validate.Length(min=1)))
@dataclass
class CreateDisclosureResponse:
success: bool
id: str
# -- DeleteDisclosure request and response -----------------------------------
@dataclass
class DeleteDisclosureRequest:
id: str
@dataclass
class DeleteDisclosureResponse:
success: bool
message: str = field(default=None)
| success: bool
description: str = field(default=None)
begin: date = field(default=None)
end: date = field(default=None)
message: str = field(default=None)
state: DisclosureState = field(default=None, metadata=dict(by_value=True))
labels: List[str] = field(default_factory=list)
data: List[DisclosureDataSeries] = field(default_factory=list) |
144.py | from chill import * | source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/144.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,128,2)
tile(1,4,32,4)
tile(2,2,128,2)
tile(2,4,32,4)
tile(3,2,128,2)
tile(3,4,32,4) | |
streams.py | from __future__ import annotations
from typing import Any
from coredis._utils import EncodingInsensitiveDict
from coredis.response._callbacks import ResponseCallback
from coredis.response._utils import flat_pairs_to_dict, flat_pairs_to_ordered_dict
from coredis.response.types import (
StreamEntry,
StreamInfo,
StreamPending,
StreamPendingExt,
)
from coredis.typing import (
AnyStr,
Dict,
Optional,
OrderedDict,
ResponseType,
StringT,
Tuple,
Union,
ValueT,
)
class StreamRangeCallback(
ResponseCallback[ResponseType, ResponseType, Tuple[StreamEntry, ...]]
):
def transform(
self, response: ResponseType, **options: Optional[ValueT]
) -> Tuple[StreamEntry, ...]:
return tuple(
StreamEntry(r[0], flat_pairs_to_ordered_dict(r[1])) for r in response
)
class ClaimCallback(
ResponseCallback[
ResponseType, ResponseType, Union[Tuple[AnyStr, ...], Tuple[StreamEntry, ...]]
]
):
def transform(
self, response: ResponseType, **options: Optional[ValueT]
) -> Union[Tuple[AnyStr, ...], Tuple[StreamEntry, ...]]:
if options.get("justid") is not None:
return tuple(response)
else:
return StreamRangeCallback()(response)
class AutoClaimCallback(
ResponseCallback[
ResponseType,
ResponseType,
Union[
Tuple[AnyStr, Tuple[AnyStr, ...]],
Tuple[AnyStr, Tuple[StreamEntry, ...], Tuple[AnyStr, ...]],
],
]
):
def transform(
self, response: ResponseType, **options: Optional[ValueT]
) -> Union[
Tuple[AnyStr, Tuple[AnyStr, ...]],
Tuple[AnyStr, Tuple[StreamEntry, ...], Tuple[AnyStr, ...]],
]:
if options.get("justid") is not None:
return response[0], tuple(response[1])
else:
return (
response[0],
StreamRangeCallback()(response[1]),
tuple(response[2]) if len(response) > 2 else (),
)
class MultiStreamRangeCallback(
ResponseCallback[
ResponseType, ResponseType, Optional[Dict[AnyStr, Tuple[StreamEntry, ...]]]
]
):
def transform_3(
self, response: ResponseType, **options: Optional[ValueT]
) -> Optional[Dict[AnyStr, Tuple[StreamEntry, ...]]]:
if response:
mapping: Dict[AnyStr, Tuple[StreamEntry, ...]] = {}
for stream_id, entries in response.items():
mapping[stream_id] = tuple(
StreamEntry(r[0], flat_pairs_to_ordered_dict(r[1])) for r in entries
)
return mapping
return None
def transform(
self, response: ResponseType, **options: Optional[ValueT]
) -> Optional[Dict[AnyStr, Tuple[StreamEntry, ...]]]:
if response:
mapping: Dict[AnyStr, Tuple[StreamEntry, ...]] = {}
for stream_id, entries in response:
mapping[stream_id] = tuple(
StreamEntry(r[0], flat_pairs_to_ordered_dict(r[1])) for r in entries
)
return mapping
return None
class PendingCallback(
ResponseCallback[
ResponseType, ResponseType, Union[StreamPending, Tuple[StreamPendingExt, ...]]
]
):
def transform(
self, response: ResponseType, **options: Optional[ValueT]
) -> Union[StreamPending, Tuple[StreamPendingExt, ...]]:
if not options.get("count"):
return StreamPending(
response[0],
response[1],
response[2],
OrderedDict((r[0], int(r[1])) for r in response[3] or []),
)
else:
return tuple(
StreamPendingExt(sub[0], sub[1], sub[2], sub[3]) for sub in response
)
class XInfoCallback(
ResponseCallback[ResponseType, ResponseType, Tuple[Dict[AnyStr, AnyStr], ...]]
):
def transform(
self, response: ResponseType, **options: Optional[ValueT]
) -> Tuple[Dict[AnyStr, AnyStr], ...]:
return tuple(flat_pairs_to_dict(row) for row in response)
class StreamInfoCallback(ResponseCallback[ResponseType, ResponseType, StreamInfo]):
def transform(
self, response: ResponseType, **options: Optional[ValueT]
) -> StreamInfo:
res: Dict[StringT, Any] = EncodingInsensitiveDict(flat_pairs_to_dict(response))
if not options.get("full"):
k1 = "first-entry"
kn = "last-entry"
e1: Optional[StreamEntry] = None
en: Optional[StreamEntry] = None
if len(res.get(k1, [])) > 0:
v = res.get(k1)
e1 = StreamEntry(v[0], flat_pairs_to_ordered_dict(v[1]))
res.pop(k1)
if len(res.get(kn, [])) > 0:
v = res.get(kn)
en = StreamEntry(v[0], flat_pairs_to_ordered_dict(v[1]))
res.pop(kn)
res.update({"first-entry": e1, "last-entry": en})
else:
groups = res.get("groups")
if groups:
res.update({"groups": flat_pairs_to_dict(groups)})
res.update(
{
"entries": tuple(
StreamEntry(k[0], flat_pairs_to_ordered_dict(k[1]))
for k in res.get("entries", [])
)
}
)
stream_info: StreamInfo = {
"first-entry": res.get("first-entry"), | "last-entry": res.get("last-entry"),
"length": res["length"],
"radix-tree-keys": res["radix-tree-keys"],
"radix-tree-nodes": res["radix-tree-nodes"],
"groups": res["groups"],
"last-generated-id": res["last-generated-id"],
"max-deleted-entry-id": str(res.get("max-deleted-entry-id")),
"entries-added": int(res.get("entries-added", 0)),
"recorded-first-entry-id": str(res.get("recorded-first-entry-id")),
"entries-read": int(res.get("entries-read", 0)),
"entries": res.get("entries"),
}
return stream_info | |
ip.go | package ip
import (
"net"
"strings"
)
// InternalIP get internal ip.
func | () string {
inters, err := net.Interfaces()
if err != nil {
return ""
}
for _, inter := range inters {
if !strings.HasPrefix(inter.Name, "lo") {
addrs, err := inter.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
}
}
}
}
}
return ""
}
| InternalIP |
HeannotationOUTdetailsSolveRefEAnnotationEStringToStringMapEntryEAnnotationEStringToStringMapEntry.py |
from core.himesis import Himesis
class HeannotationOUTdetailsSolveRefEAnnotationEStringToStringMapEntryEAnnotationEStringToStringMapEntry(Himesis):
def | (self):
"""
Creates the himesis graph representing the AToM3 model HeannotationOUTdetailsSolveRefEAnnotationEStringToStringMapEntryEAnnotationEStringToStringMapEntry.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HeannotationOUTdetailsSolveRefEAnnotationEStringToStringMapEntryEAnnotationEStringToStringMapEntry, self).__init__(name='HeannotationOUTdetailsSolveRefEAnnotationEStringToStringMapEntryEAnnotationEStringToStringMapEntry', num_nodes=27, edges=[])
# Add the edges
self.add_edges([[0, 6], [6, 5], [0, 8], [8, 7], [1, 10], [10, 9], [1, 12], [12, 11], [5, 3], [3, 7], [9, 4], [4, 11], [9, 13], [13, 5], [11, 14], [14, 7], [9, 15], [15, 16], [17, 18], [18, 16], [17, 19], [19, 20], [11, 21], [21, 22], [23, 24], [24, 22], [23, 25], [25, 26], [0, 2], [2, 1]])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """eannotationOUTdetailsSolveRefEAnnotationEStringToStringMapEntryEAnnotationEStringToStringMapEntry"""
self["GUID__"] = 6128048148929558276
# Set the node attributes
self.vs[0]["mm__"] = """MatchModel"""
self.vs[0]["GUID__"] = 3241548613573966355
self.vs[1]["mm__"] = """ApplyModel"""
self.vs[1]["GUID__"] = 7588304692645087636
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["GUID__"] = 2842412945556472011
self.vs[3]["associationType"] = """details"""
self.vs[3]["mm__"] = """directLink_S"""
self.vs[3]["GUID__"] = 4504396410669904024
self.vs[4]["associationType"] = """details"""
self.vs[4]["mm__"] = """directLink_T"""
self.vs[4]["GUID__"] = 278924163179204411
self.vs[5]["name"] = """"""
self.vs[5]["classtype"] = """EAnnotation"""
self.vs[5]["mm__"] = """EAnnotation"""
self.vs[5]["cardinality"] = """+"""
self.vs[5]["GUID__"] = 8921102541237060280
self.vs[6]["mm__"] = """match_contains"""
self.vs[6]["GUID__"] = 5548880848816893316
self.vs[7]["name"] = """"""
self.vs[7]["classtype"] = """EStringToStringMapEntry"""
self.vs[7]["mm__"] = """EStringToStringMapEntry"""
self.vs[7]["cardinality"] = """+"""
self.vs[7]["GUID__"] = 3620815062825569950
self.vs[8]["mm__"] = """match_contains"""
self.vs[8]["GUID__"] = 4633829444907935660
self.vs[9]["name"] = """"""
self.vs[9]["classtype"] = """EAnnotation"""
self.vs[9]["mm__"] = """EAnnotation"""
self.vs[9]["cardinality"] = """1"""
self.vs[9]["GUID__"] = 5286614269433226811
self.vs[10]["mm__"] = """apply_contains"""
self.vs[10]["GUID__"] = 4260923047893991880
self.vs[11]["name"] = """"""
self.vs[11]["classtype"] = """EStringToStringMapEntry"""
self.vs[11]["mm__"] = """EStringToStringMapEntry"""
self.vs[11]["cardinality"] = """1"""
self.vs[11]["GUID__"] = 9089520517795853435
self.vs[12]["mm__"] = """apply_contains"""
self.vs[12]["GUID__"] = 7992456823388861929
self.vs[13]["mm__"] = """backward_link"""
self.vs[13]["type"] = """ruleDef"""
self.vs[13]["GUID__"] = 5846802733544168611
self.vs[14]["mm__"] = """backward_link"""
self.vs[14]["type"] = """ruleDef"""
self.vs[14]["GUID__"] = 8076248770867941562
self.vs[15]["mm__"] = """hasAttribute_T"""
self.vs[15]["GUID__"] = 5555931425631113564
self.vs[16]["name"] = """ApplyAttribute"""
self.vs[16]["Type"] = """'String'"""
self.vs[16]["mm__"] = """Attribute"""
self.vs[16]["GUID__"] = 458427999981145130
self.vs[17]["name"] = """eq_"""
self.vs[17]["mm__"] = """Equation"""
self.vs[17]["GUID__"] = 7239587957748939154
self.vs[18]["mm__"] = """leftExpr"""
self.vs[18]["GUID__"] = 4582391555812468568
self.vs[19]["mm__"] = """rightExpr"""
self.vs[19]["GUID__"] = 3632186366108425725
self.vs[20]["name"] = """solveRef"""
self.vs[20]["Type"] = """'String'"""
self.vs[20]["mm__"] = """Constant"""
self.vs[20]["GUID__"] = 2856418966115592764
self.vs[21]["mm__"] = """hasAttribute_T"""
self.vs[21]["GUID__"] = 6569304790853800773
self.vs[22]["name"] = """ApplyAttribute"""
self.vs[22]["Type"] = """'String'"""
self.vs[22]["mm__"] = """Attribute"""
self.vs[22]["GUID__"] = 5571281642373343531
self.vs[23]["name"] = """eq_"""
self.vs[23]["mm__"] = """Equation"""
self.vs[23]["GUID__"] = 7415746926020923022
self.vs[24]["mm__"] = """leftExpr"""
self.vs[24]["GUID__"] = 8365403988732939094
self.vs[25]["mm__"] = """rightExpr"""
self.vs[25]["GUID__"] = 5175019940122105361
self.vs[26]["name"] = """solveRef"""
self.vs[26]["Type"] = """'String'"""
self.vs[26]["mm__"] = """Constant"""
self.vs[26]["GUID__"] = 2895318596396602101
| __init__ |
108.js | webpackJsonp([108],{
/***/ 2016:
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
Object.defineProperty(__webpack_exports__, "__esModule", { value: true });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "AddonModFeedbackNonRespondentsPageModule", function() { return AddonModFeedbackNonRespondentsPageModule; });
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__angular_core__ = __webpack_require__(0);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1_ionic_angular__ = __webpack_require__(3);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__ngx_translate_core__ = __webpack_require__(1);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__directives_directives_module__ = __webpack_require__(14);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__components_components_module__ = __webpack_require__(13);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_5__components_components_module__ = __webpack_require__(997);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_6__nonrespondents__ = __webpack_require__(2172);
// (C) Copyright 2015 Moodle Pty Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var AddonModFeedbackNonRespondentsPageModule = /** @class */ (function () {
function AddonModFeedbackNonRespondentsPageModule() {
}
AddonModFeedbackNonRespondentsPageModule = __decorate([
Object(__WEBPACK_IMPORTED_MODULE_0__angular_core__["I" /* NgModule */])({
declarations: [
__WEBPACK_IMPORTED_MODULE_6__nonrespondents__["a" /* AddonModFeedbackNonRespondentsPage */],
],
imports: [
__WEBPACK_IMPORTED_MODULE_3__directives_directives_module__["a" /* CoreDirectivesModule */],
__WEBPACK_IMPORTED_MODULE_4__components_components_module__["a" /* CoreComponentsModule */],
__WEBPACK_IMPORTED_MODULE_5__components_components_module__["a" /* AddonModFeedbackComponentsModule */],
__WEBPACK_IMPORTED_MODULE_1_ionic_angular__["l" /* IonicPageModule */].forChild(__WEBPACK_IMPORTED_MODULE_6__nonrespondents__["a" /* AddonModFeedbackNonRespondentsPage */]),
__WEBPACK_IMPORTED_MODULE_2__ngx_translate_core__["b" /* TranslateModule */].forChild()
],
})
], AddonModFeedbackNonRespondentsPageModule);
return AddonModFeedbackNonRespondentsPageModule;
}());
//# sourceMappingURL=nonrespondents.module.js.map
/***/ }),
/***/ 2172:
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return AddonModFeedbackNonRespondentsPage; });
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__angular_core__ = __webpack_require__(0);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1_ionic_angular__ = __webpack_require__(3);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__providers_feedback__ = __webpack_require__(90);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__providers_helper__ = __webpack_require__(193);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__providers_groups__ = __webpack_require__(54);
/* harmony import */ var __WEBPACK_IMPORTED_MODULE_5__providers_utils_dom__ = __webpack_require__(7);
// (C) Copyright 2015 Moodle Pty Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __metadata = (this && this.__metadata) || function (k, v) {
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
};
/**
* Page that displays feedback non respondents.
*/
var AddonModFeedbackNonRespondentsPage = /** @class */ (function () {
function AddonModFeedbackNonRespondentsPage(navParams, feedbackProvider, groupsProvider, domUtils, feedbackHelper, navCtrl) {
this.feedbackProvider = feedbackProvider;
this.groupsProvider = groupsProvider;
this.domUtils = domUtils;
this.feedbackHelper = feedbackHelper;
this.navCtrl = navCtrl;
this.page = 0;
this.groupInfo = {
groups: [],
separateGroups: false,
visibleGroups: false
};
this.users = [];
this.total = 0;
this.canLoadMore = false;
this.feedbackLoaded = false;
this.loadingMore = false;
var module = navParams.get('module');
this.moduleId = module.id;
this.feedbackId = module.instance;
this.courseId = navParams.get('courseId');
this.selectedGroup = navParams.get('group') || 0;
}
/**
* View loaded.
*/
AddonModFeedbackNonRespondentsPage.prototype.ionViewDidLoad = function () {
this.fetchData();
};
/**
* Fetch all the data required for the view.
*
* @param refresh Empty events array first.
* @return Promise resolved when done.
*/
AddonModFeedbackNonRespondentsPage.prototype.fetchData = function (refresh) {
var _this = this;
if (refresh === void 0) { refresh = false; }
this.page = 0;
this.total = 0;
this.users = [];
return this.groupsProvider.getActivityGroupInfo(this.moduleId).then(function (groupInfo) {
_this.groupInfo = groupInfo;
_this.selectedGroup = _this.groupsProvider.validateGroupId(_this.selectedGroup, groupInfo);
return _this.loadGroupUsers(_this.selectedGroup);
}).catch(function (message) {
_this.domUtils.showErrorModalDefault(message, 'core.course.errorgetmodule', true);
if (!refresh) {
// Some call failed on first fetch, go back.
_this.navCtrl.pop(); | });
};
/**
* Load Group responses.
*
* @param groupId If defined it will change group if not, it will load more users for the same group.
* @return Resolved with the attempts loaded.
*/
AddonModFeedbackNonRespondentsPage.prototype.loadGroupUsers = function (groupId) {
var _this = this;
if (typeof groupId == 'undefined') {
this.page++;
this.loadingMore = true;
}
else {
this.selectedGroup = groupId;
this.page = 0;
this.total = 0;
this.users = [];
this.feedbackLoaded = false;
}
return this.feedbackHelper.getNonRespondents(this.feedbackId, this.selectedGroup, this.page).then(function (response) {
_this.total = response.total;
if (_this.users.length < response.total) {
_this.users = _this.users.concat(response.users);
}
_this.canLoadMore = _this.users.length < response.total;
return response;
}).finally(function () {
_this.loadingMore = false;
_this.feedbackLoaded = true;
});
};
/**
* Change selected group or load more users.
*
* @param groupId Group ID selected. If not defined, it will load more users.
*/
AddonModFeedbackNonRespondentsPage.prototype.loadAttempts = function (groupId) {
var _this = this;
this.loadGroupUsers(groupId).catch(function (message) {
_this.domUtils.showErrorModalDefault(message, 'core.course.errorgetmodule', true);
});
};
/**
* Refresh the attempts.
*
* @param refresher Refresher.
*/
AddonModFeedbackNonRespondentsPage.prototype.refreshFeedback = function (refresher) {
var _this = this;
if (this.feedbackLoaded) {
var promises = [];
promises.push(this.feedbackProvider.invalidateNonRespondentsData(this.feedbackId));
promises.push(this.groupsProvider.invalidateActivityGroupInfo(this.moduleId));
Promise.all(promises).finally(function () {
return _this.fetchData(true);
}).finally(function () {
refresher.complete();
});
}
};
AddonModFeedbackNonRespondentsPage = __decorate([
Object(__WEBPACK_IMPORTED_MODULE_0__angular_core__["m" /* Component */])({
selector: 'page-addon-mod-feedback-nonrespondents',template:/*ion-inline-start:"C:\Windows\System32\moodlemobiledirectory\src\addon\mod\feedback\pages\nonrespondents\nonrespondents.html"*/'<ion-header>\n\n <ion-navbar core-back-button>\n\n <ion-title>{{ \'addon.mod_feedback.responses\' |translate }}</ion-title>\n\n </ion-navbar>\n\n</ion-header>\n\n<ion-content>\n\n <ion-refresher [enabled]="feedbackLoaded" (ionRefresh)="refreshFeedback($event)">\n\n <ion-refresher-content pullingText="{{ \'core.pulltorefresh\' | translate }}"></ion-refresher-content>\n\n </ion-refresher>\n\n <core-loading [hideUntil]="feedbackLoaded">\n\n <ion-list no-margin>\n\n <ion-item text-wrap *ngIf="groupInfo.separateGroups || groupInfo.visibleGroups">\n\n <ion-label id="addon-feedback-groupslabel" *ngIf="groupInfo.separateGroups">{{ \'core.groupsseparate\' | translate }}</ion-label>\n\n <ion-label id="addon-feedback-groupslabel" *ngIf="groupInfo.visibleGroups">{{ \'core.groupsvisible\' | translate }}</ion-label>\n\n <ion-select [(ngModel)]="selectedGroup" (ionChange)="loadAttempts(selectedGroup)" aria-labelledby="addon-feedback-groupslabel" interface="action-sheet">\n\n <ion-option *ngFor="let groupOpt of groupInfo.groups" [value]="groupOpt.id">{{groupOpt.name}}</ion-option>\n\n </ion-select>\n\n </ion-item>\n\n <ion-item-divider>\n\n {{ \'addon.mod_feedback.non_respondents_students\' | translate : {$a: total } }}\n\n </ion-item-divider>\n\n <ng-container *ngIf="total > 0">\n\n <ion-item *ngFor="let user of users" text-wrap>\n\n <ion-avatar core-user-avatar [user]="user" item-start></ion-avatar>\n\n <h2>{{ user.fullname }}</h2>\n\n <p>\n\n <ion-badge color="success" *ngIf="user.started">\n\n {{ \'addon.mod_feedback.started\' | translate}}\n\n </ion-badge>\n\n <ion-badge color="danger" *ngIf="!user.started">\n\n {{ \'addon.mod_feedback.not_started\' | translate}}\n\n </ion-badge>\n\n </p>\n\n </ion-item>\n\n </ng-container>\n\n <ion-item padding text-center *ngIf="canLoadMore">\n\n <!-- Button and spinner to show more attempts. -->\n\n <button ion-button block *ngIf="!loadingMore" (click)="loadAttempts()">{{ \'core.loadmore\' | translate }}</button>\n\n <ion-spinner *ngIf="loadingMore"></ion-spinner>\n\n </ion-item>\n\n </ion-list>\n\n </core-loading>\n\n</ion-content>\n\n'/*ion-inline-end:"C:\Windows\System32\moodlemobiledirectory\src\addon\mod\feedback\pages\nonrespondents\nonrespondents.html"*/,
}),
__metadata("design:paramtypes", [__WEBPACK_IMPORTED_MODULE_1_ionic_angular__["t" /* NavParams */], __WEBPACK_IMPORTED_MODULE_2__providers_feedback__["a" /* AddonModFeedbackProvider */],
__WEBPACK_IMPORTED_MODULE_4__providers_groups__["a" /* CoreGroupsProvider */], __WEBPACK_IMPORTED_MODULE_5__providers_utils_dom__["b" /* CoreDomUtilsProvider */],
__WEBPACK_IMPORTED_MODULE_3__providers_helper__["a" /* AddonModFeedbackHelperProvider */], __WEBPACK_IMPORTED_MODULE_1_ionic_angular__["s" /* NavController */]])
], AddonModFeedbackNonRespondentsPage);
return AddonModFeedbackNonRespondentsPage;
}());
//# sourceMappingURL=nonrespondents.js.map
/***/ })
});
//# sourceMappingURL=108.js.map | }
return Promise.reject(null); |
0022_auto_20150222_0024.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class | (migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0021_card_video_story'),
]
operations = [
migrations.AddField(
model_name='userpreferences',
name='following',
field=models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterField(
model_name='account',
name='owner',
field=models.ForeignKey(related_name='accounts_set', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterField(
model_name='ownedcard',
name='card',
field=models.ForeignKey(related_name='ownedcards', to='api.Card'),
preserve_default=True,
),
migrations.AlterField(
model_name='ownedcard',
name='owner_account',
field=models.ForeignKey(related_name='ownedcards', to='api.Account'),
preserve_default=True,
),
]
| Migration |
test_rgb_hsv.py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RgbToHsv and HsvToRgb op in DE
"""
import colorsys
import numpy as np
from numpy.testing import assert_allclose
import mindspore.dataset as ds
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.py_transforms_util as util
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def | (shape):
# Only generate floating points that are fractions like n / 256, since they
# are RGB pixels. Some low-precision floating point types in this test can't
# handle arbitrary precision floating points well.
return np.random.randint(0, 256, shape) / 255.
def test_rgb_hsv_hwc():
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((8, 8, 3))
hsv_base = np.array([
colorsys.rgb_to_hsv(
r.astype(np.float64), g.astype(np.float64), b.astype(np.float64))
for r, g, b in rgb_flat
])
hsv_base = hsv_base.reshape((8, 8, 3))
hsv_de = util.rgb_to_hsvs(rgb_np, True)
assert hsv_base.shape == hsv_de.shape
assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0)
hsv_flat = hsv_base.reshape(64, 3)
rgb_base = np.array([
colorsys.hsv_to_rgb(
h.astype(np.float64), s.astype(np.float64), v.astype(np.float64))
for h, s, v in hsv_flat
])
rgb_base = rgb_base.reshape((8, 8, 3))
rgb_de = util.hsv_to_rgbs(hsv_base, True)
assert rgb_base.shape == rgb_de.shape
assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0)
def test_rgb_hsv_batch_hwc():
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((4, 2, 8, 3))
hsv_base = np.array([
colorsys.rgb_to_hsv(
r.astype(np.float64), g.astype(np.float64), b.astype(np.float64))
for r, g, b in rgb_flat
])
hsv_base = hsv_base.reshape((4, 2, 8, 3))
hsv_de = util.rgb_to_hsvs(rgb_np, True)
assert hsv_base.shape == hsv_de.shape
assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0)
hsv_flat = hsv_base.reshape((64, 3))
rgb_base = np.array([
colorsys.hsv_to_rgb(
h.astype(np.float64), s.astype(np.float64), v.astype(np.float64))
for h, s, v in hsv_flat
])
rgb_base = rgb_base.reshape((4, 2, 8, 3))
rgb_de = util.hsv_to_rgbs(hsv_base, True)
assert rgb_de.shape == rgb_base.shape
assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0)
def test_rgb_hsv_chw():
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((3, 8, 8))
hsv_base = np.array([
np.vectorize(colorsys.rgb_to_hsv)(
rgb_np[0, :, :].astype(np.float64), rgb_np[1, :, :].astype(np.float64), rgb_np[2, :, :].astype(np.float64))
])
hsv_base = hsv_base.reshape((3, 8, 8))
hsv_de = util.rgb_to_hsvs(rgb_np, False)
assert hsv_base.shape == hsv_de.shape
assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0)
rgb_base = np.array([
np.vectorize(colorsys.hsv_to_rgb)(
hsv_base[0, :, :].astype(np.float64), hsv_base[1, :, :].astype(np.float64),
hsv_base[2, :, :].astype(np.float64))
])
rgb_base = rgb_base.reshape((3, 8, 8))
rgb_de = util.hsv_to_rgbs(hsv_base, False)
assert rgb_de.shape == rgb_base.shape
assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0)
def test_rgb_hsv_batch_chw():
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_imgs = rgb_flat.reshape((4, 3, 2, 8))
hsv_base_imgs = np.array([
np.vectorize(colorsys.rgb_to_hsv)(
img[0, :, :].astype(np.float64), img[1, :, :].astype(np.float64), img[2, :, :].astype(np.float64))
for img in rgb_imgs
])
hsv_de = util.rgb_to_hsvs(rgb_imgs, False)
assert hsv_base_imgs.shape == hsv_de.shape
assert_allclose(hsv_base_imgs.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0)
rgb_base = np.array([
np.vectorize(colorsys.hsv_to_rgb)(
img[0, :, :].astype(np.float64), img[1, :, :].astype(np.float64), img[2, :, :].astype(np.float64))
for img in hsv_base_imgs
])
rgb_de = util.hsv_to_rgbs(hsv_base_imgs, False)
assert rgb_base.shape == rgb_de.shape
assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0)
def test_rgb_hsv_pipeline():
# First dataset
transforms1 = [
vision.Decode(True),
vision.Resize([64, 64]),
vision.ToTensor()
]
transforms1 = mindspore.dataset.transforms.transforms.Compose(transforms1)
ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
ds1 = ds1.map(operations=transforms1, input_columns=["image"])
# Second dataset
transforms2 = [
vision.Decode(True),
vision.Resize([64, 64]),
vision.ToTensor(),
vision.RgbToHsv(),
vision.HsvToRgb()
]
transform2 = mindspore.dataset.transforms.transforms.Compose(transforms2)
ds2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
ds2 = ds2.map(operations=transform2, input_columns=["image"])
num_iter = 0
for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1), ds2.create_dict_iterator(num_epochs=1)):
num_iter += 1
ori_img = data1["image"].asnumpy()
cvt_img = data2["image"].asnumpy()
assert_allclose(ori_img.flatten(), cvt_img.flatten(), rtol=1e-5, atol=0)
assert ori_img.shape == cvt_img.shape
if __name__ == "__main__":
test_rgb_hsv_hwc()
test_rgb_hsv_batch_hwc()
test_rgb_hsv_chw()
test_rgb_hsv_batch_chw()
test_rgb_hsv_pipeline()
| generate_numpy_random_rgb |
app-router.ts | import {Container} from 'aurelia-dependency-injection';
import {History} from 'aurelia-history';
import {Router} from './router';
import {PipelineProvider} from './pipeline-provider';
import {isNavigationCommand} from './navigation-commands';
import {EventAggregator} from 'aurelia-event-aggregator';
export class AppRouter extends Router {
pipelineProvider;
events;
history;
queue;
isNavigating;
isActive;
container;
options;
static inject(){ return [Container, History, PipelineProvider, EventAggregator]; }
constructor(container, history, pipelineProvider, events) {
super(container, history);
this.pipelineProvider = pipelineProvider;
document.addEventListener('click', handleLinkClick.bind(this), true);
this.events = events;
}
get isRoot() {
return true;
}
loadUrl(url) {
return this.createNavigationInstruction(url).
then(instruction => this.queueInstruction(instruction)).
catch(error => {
console.error(error);
if (this.history.previousFragment) {
this.navigate(this.history.previousFragment, false);
}
});
}
queueInstruction(instruction) {
return new Promise(resolve => {
instruction.resolve = resolve;
this.queue.unshift(instruction);
this.dequeueInstruction();
});
}
dequeueInstruction() {
if (this.isNavigating) {
return;
}
var instruction = this.queue.shift();
this.queue = [];
if (!instruction) {
return;
}
this.isNavigating = true;
this.events.publish('router:navigation:processing', instruction);
var context = this.createNavigationContext(instruction);
var pipeline = this.pipelineProvider.createPipeline(context);
pipeline.run(context).then(result => {
this.isNavigating = false;
if (result.completed) {
this.history.previousFragment = instruction.fragment;
}
if (result.output instanceof Error) {
console.error(result.output);
this.events.publish('router:navigation:error', { instruction, result });
}
if (isNavigationCommand(result.output)) {
result.output.navigate(this);
} else if (!result.completed) {
this.navigate(this.history.previousFragment || '', false);
this.events.publish('router:navigation:cancelled', instruction)
}
instruction.resolve(result);
this.dequeueInstruction();
})
.then(result => this.events.publish('router:navigation:complete', instruction))
.catch(error => {
console.error(error);
});
}
registerViewPort(viewPort, name) {
super.registerViewPort(viewPort, name);
if (!this.isActive) {
if('configureRouter' in this.container.viewModel){
var result = this.container.viewModel.configureRouter() || Promise.resolve();
return result.then(() => this.activate());
}else{
this.activate();
}
} else {
this.dequeueInstruction();
}
}
activate(options?) {
if (this.isActive) {
return;
}
this.isActive = true;
this.options = Object["assign"]({ routeHandler: this.loadUrl.bind(this) }, this.options, options);
this.history.activate(this.options);
this.dequeueInstruction();
}
deactivate() {
this.isActive = false;
this.history.deactivate();
}
reset() {
super.reset();
this.queue = [];
this.options = null;
}
}
function findAnchor(el) {
while (el) {
if (el.tagName === "A") return el;
el = el.parentNode;
}
}
function | (evt) {
if (!this.isActive) {
return;
}
var target = findAnchor(evt.target);
if (!target) {
return;
}
if (this.history._hasPushState) {
if (!evt.altKey && !evt.ctrlKey && !evt.metaKey && !evt.shiftKey && targetIsThisWindow(target)) {
var href = target.getAttribute('href');
// Ensure the protocol is not part of URL, meaning its relative.
// Stop the event bubbling to ensure the link will not cause a page refresh.
if (href !== null && !(href.charAt(0) === "#" || (/^[a-z]+:/i).test(href))) {
evt.preventDefault();
this.history.navigate(href);
}
}
}
}
function targetIsThisWindow(target) {
var targetWindow = target.getAttribute('target');
return !targetWindow ||
targetWindow === window.name ||
targetWindow === '_self' ||
(targetWindow === 'top' && window === window.top);
}
| handleLinkClick |
buck.py | # Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
import abc
import collections
import contextlib
import functools
import imp
import inspect
import json
import optparse
import os
import os.path
import platform
import re
import sys
import time
import traceback
import types
from pathlib import Path, PurePath
from select import select as _select
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Pattern,
Set,
Tuple,
TypeVar,
Union,
)
import pywatchman
from pywatchman import WatchmanError
from six import PY3, iteritems, itervalues, string_types
# Python 2.6, 2.7, use iterator filter from Python 3
from six.moves import builtins, filter
from .deterministic_set import DeterministicSet
from .glob_internal import glob_internal
from .glob_watchman import SyncCookieState, glob_watchman
from .json_encoder import BuckJSONEncoder
from .module_whitelist import ImportWhitelistManager
from .profiler import Profiler, Tracer, emit_trace, scoped_trace, traced
from .select_support import SelectorList, SelectorValue
from .struct import create_struct_class, struct
from .util import (
Diagnostic,
cygwin_adjusted_path,
get_caller_frame,
is_in_dir,
is_special,
)
# When build files are executed, the functions in this file tagged with
# @provide_for_build will be provided in the build file's local symbol table.
# Those tagged with @provide_as_native_rule will be present unless
# explicitly disabled by parser.native_rules_enabled_in_build_files
#
# When these functions are called from a build file, they will be passed
# a keyword parameter, build_env, which is a object with information about
# the environment of the build file which is currently being processed.
# It contains the following attributes:
#
# "dirname" - The directory containing the build file.
#
# "base_path" - The base path of the build file.
#
# "cell_name" - The cell name the build file is in.
BUILD_FUNCTIONS = [] # type: List[Callable]
NATIVE_FUNCTIONS = [] # type: List[Callable]
# Wait this many seconds on recv() or send() in the pywatchman client
# if not otherwise specified in .buckconfig
DEFAULT_WATCHMAN_QUERY_TIMEOUT = 60.0 # type: float
# Globals that should not be copied from one module into another
_HIDDEN_GLOBALS = {"include_defs", "load"} # type: Set[str]
ORIGINAL_IMPORT = builtins.__import__
_LOAD_TARGET_PATH_RE = re.compile(
r"^(?P<root>(?P<cell>@?[\w\-.]+)?//)?(?P<package>.*):(?P<target>.*)$"
) # type: Pattern[str]
# matches anything equivalent to recursive glob on all dirs
# e.g. "**/", "*/**/", "*/*/**/"
_RECURSIVE_GLOB_PATTERN = re.compile("^(\*/)*\*\*/") # type: Pattern[str]
class AbstractContext(object):
"""Superclass of execution contexts."""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def includes(self):
# type: () -> Set[str]
raise NotImplementedError()
@abc.abstractproperty
def used_configs(self):
# type: () -> Dict[str, Dict[str, str]]
raise NotImplementedError()
@abc.abstractproperty
def used_env_vars(self):
# type: () -> Dict[str, str]
raise NotImplementedError()
@abc.abstractproperty
def diagnostics(self):
# type: () -> List[Diagnostic]
raise NotImplementedError()
def merge(self, other):
# type: (AbstractContext) -> None
"""Merge the context of an included file into the current context.
:param AbstractContext other: the include context to merge.
:rtype: None
"""
self.includes.update(other.includes)
self.diagnostics.extend(other.diagnostics)
self.used_configs.update(other.used_configs)
self.used_env_vars.update(other.used_env_vars)
class BuildFileContext(AbstractContext):
"""The build context used when processing a build file."""
def __init__(
self,
project_root,
base_path,
path,
dirname,
cell_name,
allow_empty_globs,
ignore_paths,
watchman_client,
watchman_watch_root,
watchman_project_prefix,
sync_cookie_state,
watchman_glob_stat_results,
watchman_use_glob_generator,
implicit_package_symbols,
):
self.globals = {}
self._includes = set()
self._used_configs = collections.defaultdict(dict)
self._used_env_vars = {}
self._diagnostics = []
self.rules = {}
self.project_root = project_root
self.base_path = base_path
self.path = path
self.cell_name = cell_name
self.dirname = dirname
self.allow_empty_globs = allow_empty_globs
self.ignore_paths = ignore_paths
self.watchman_client = watchman_client
self.watchman_watch_root = watchman_watch_root
self.watchman_project_prefix = watchman_project_prefix
self.sync_cookie_state = sync_cookie_state
self.watchman_glob_stat_results = watchman_glob_stat_results
self.watchman_use_glob_generator = watchman_use_glob_generator
self.implicit_package_symbols = implicit_package_symbols
@property
def includes(self):
return self._includes
@property
def used_configs(self):
return self._used_configs
@property
def used_env_vars(self):
return self._used_env_vars
@property
def diagnostics(self):
return self._diagnostics
class IncludeContext(AbstractContext):
"""The build context used when processing an include."""
def __init__(self, cell_name, path):
# type: (str, str) -> None
"""
:param cell_name: a cell name of the current context. Note that this cell name can be
different from the one BUCK file is evaluated in, since it can load extension files
from other cells, which should resolve their loads relative to their own location.
"""
self.cell_name = cell_name
self.path = path
self.globals = {}
self._includes = set()
self._used_configs = collections.defaultdict(dict)
self._used_env_vars = {}
self._diagnostics = []
@property
def includes(self):
return self._includes
@property
def used_configs(self):
return self._used_configs
@property
def used_env_vars(self):
return self._used_env_vars
@property
def diagnostics(self):
return self._diagnostics
# Generic context type that should be used in places where return and parameter
# types are the same but could be either of the concrete contexts.
_GCT = TypeVar("_GCT", IncludeContext, BuildFileContext)
LoadStatement = Dict[str, Union[str, Dict[str, str]]]
BuildInclude = collections.namedtuple("BuildInclude", ["cell_name", "path"])
class LazyBuildEnvPartial(object):
"""Pairs a function with a build environment in which it will be executed.
Note that while the function is specified via the constructor, the build
environment must be assigned after construction, for the build environment
currently being used.
To call the function with its build environment, use the invoke() method of
this class, which will forward the arguments from invoke() to the
underlying function.
"""
def __init__(self, func):
# type: (Callable) -> None
self.func = func
self.build_env = None
def invoke(self, *args, **kwargs):
"""Invokes the bound function injecting 'build_env' into **kwargs."""
updated_kwargs = kwargs.copy()
updated_kwargs.update({"build_env": self.build_env})
try:
return self.func(*args, **updated_kwargs)
except TypeError:
missing_args, extra_args = get_mismatched_args(
self.func, args, updated_kwargs
)
if missing_args or extra_args:
name = "[missing]"
if "name" in updated_kwargs:
name = updated_kwargs["name"]
elif len(args) > 0:
# Optimistically hope that name is the first arg. It generally is...
name = args[0]
raise IncorrectArgumentsException(
self.func.func_name, name, missing_args, extra_args
)
raise
HostInfoOs = collections.namedtuple(
"HostInfoOs", ["is_linux", "is_macos", "is_windows", "is_freebsd", "is_unknown"]
)
HostInfoArch = collections.namedtuple(
"HostInfoArch",
[
"is_aarch64",
"is_arm",
"is_armeb",
"is_i386",
"is_mips",
"is_mips64",
"is_mipsel",
"is_mipsel64",
"is_powerpc",
"is_ppc64",
"is_unknown",
"is_x86_64",
],
)
HostInfo = collections.namedtuple("HostInfo", ["os", "arch"])
__supported_oses = {
"darwin": "macos",
"windows": "windows",
"linux": "linux",
"freebsd": "freebsd",
} # type: Dict[str, str]
# Pulled from com.facebook.buck.util.environment.Architecture.java as
# possible values. amd64 and arm64 are remapped, but they may not
# actually be present on most systems
__supported_archs = {
"aarch64": "aarch64",
"arm": "arm",
"armeb": "armeb",
"i386": "i386",
"mips": "mips",
"mips64": "mips64",
"mipsel": "mipsel",
"mipsel64": "mipsel64",
"powerpc": "powerpc",
"ppc64": "ppc64",
"unknown": "unknown",
"x86_64": "x86_64",
"amd64": "x86_64",
"arm64": "aarch64",
} # type: Dict[str, str]
def host_info(platform_system=platform.system, platform_machine=platform.machine):
host_arch = __supported_archs.get(platform_machine().lower(), "unknown")
host_os = __supported_oses.get(platform_system().lower(), "unknown")
return HostInfo(
os=HostInfoOs(
is_linux=(host_os == "linux"),
is_macos=(host_os == "macos"),
is_windows=(host_os == "windows"),
is_freebsd=(host_os == "freebsd"),
is_unknown=(host_os == "unknown"),
),
arch=HostInfoArch(
is_aarch64=(host_arch == "aarch64"),
is_arm=(host_arch == "arm"),
is_armeb=(host_arch == "armeb"),
is_i386=(host_arch == "i386"),
is_mips=(host_arch == "mips"),
is_mips64=(host_arch == "mips64"),
is_mipsel=(host_arch == "mipsel"),
is_mipsel64=(host_arch == "mipsel64"),
is_powerpc=(host_arch == "powerpc"),
is_ppc64=(host_arch == "ppc64"),
is_unknown=(host_arch == "unknown"),
is_x86_64=(host_arch == "x86_64"),
),
)
_cached_host_info = host_info()
def get_mismatched_args(func, actual_args, actual_kwargs):
argspec = inspect.getargspec(func)
required_args = set()
all_acceptable_args = []
for i, arg in enumerate(argspec.args):
if i < (len(argspec.args) - len(argspec.defaults)):
required_args.add(arg)
all_acceptable_args.append(arg)
extra_kwargs = set(actual_kwargs) - set(all_acceptable_args)
for k in set(actual_kwargs) - extra_kwargs:
all_acceptable_args.remove(k)
not_supplied_args = all_acceptable_args[len(actual_args) :]
missing_args = [arg for arg in not_supplied_args if arg in required_args]
return missing_args, sorted(list(extra_kwargs))
class IncorrectArgumentsException(TypeError):
def __init__(self, func_name, name_arg, missing_args, extra_args):
self.missing_args = missing_args
self.extra_args = extra_args
message = "Incorrect arguments to %s with name %s:" % (func_name, name_arg)
if missing_args:
message += " Missing required args: %s" % (", ".join(missing_args),)
if extra_args:
message += " Extra unknown kwargs: %s" % (", ".join(extra_args),)
super(IncorrectArgumentsException, self).__init__(message)
class BuildFileFailError(Exception):
pass
def provide_as_native_rule(func):
# type: (Callable) -> Callable
NATIVE_FUNCTIONS.append(func)
return func
def provide_for_build(func):
# type: (Callable) -> Callable
BUILD_FUNCTIONS.append(func)
return func
def add_rule(rule, build_env):
# type: (Dict, BuildFileContext) -> None
"""Record a rule in the current context.
This should be invoked by rule functions generated by the Java code.
:param dict rule: dictionary of the rule's fields.
:param build_env: the current context.
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `{}()` at the top-level of an included file.".format(
rule["buck.type"]
)
# Include the base path of the BUCK file so the reader consuming this
# output will know which BUCK file the rule came from.
if "name" not in rule:
raise ValueError("rules must contain the field 'name'. Found %s." % rule)
rule_name = rule["name"]
if not isinstance(rule_name, string_types):
raise ValueError("rules 'name' field must be a string. Found %s." % rule_name)
if rule_name in build_env.rules:
raise ValueError(
"Duplicate rule definition '%s' found. Found %s and %s"
% (rule_name, rule, build_env.rules[rule_name])
)
rule["buck.base_path"] = build_env.base_path
build_env.rules[rule_name] = rule
@traced(stats_key="Glob")
def | (
includes, excludes=None, include_dotfiles=False, build_env=None, search_base=None
):
# type: (List[str], Optional[List[str]], bool, BuildFileContext, str) -> List[str]
if excludes is None:
excludes = []
assert isinstance(
build_env, BuildFileContext
), "Cannot use `glob()` at the top-level of an included file."
# Ensure the user passes lists of strings rather than just a string.
assert not isinstance(
includes, string_types
), "The first argument to glob() must be a list of strings."
assert not isinstance(
excludes, string_types
), "The excludes argument must be a list of strings."
if search_base is None:
search_base = Path(build_env.dirname)
if build_env.dirname == build_env.project_root and any(
_RECURSIVE_GLOB_PATTERN.match(pattern) for pattern in includes
):
fail(
"Recursive globs are prohibited at top-level directory", build_env=build_env
)
results = None
if not includes:
results = []
elif build_env.watchman_client:
results = glob_watchman(
includes,
excludes,
include_dotfiles,
build_env.base_path,
build_env.watchman_watch_root,
build_env.watchman_project_prefix,
build_env.sync_cookie_state,
build_env.watchman_client,
build_env.diagnostics,
build_env.watchman_glob_stat_results,
build_env.watchman_use_glob_generator,
)
if results:
# glob should consistently return paths of type str, but
# watchman client returns unicode in Python 2 instead.
# Extra check is added to make this conversion resilient to
# watchman API changes.
results = [
res.encode("utf-8") if not isinstance(res, str) else res
for res in results
]
if results is None:
results = glob_internal(
includes,
excludes,
build_env.ignore_paths,
include_dotfiles,
search_base,
build_env.project_root,
)
assert build_env.allow_empty_globs or results, (
"glob(includes={includes}, excludes={excludes}, include_dotfiles={include_dotfiles}) "
+ "returned no results. (allow_empty_globs is set to false in the Buck "
+ "configuration)"
).format(includes=includes, excludes=excludes, include_dotfiles=include_dotfiles)
return results
def merge_maps(*header_maps):
result = {}
for header_map in header_maps:
for key in header_map:
if key in result and result[key] != header_map[key]:
assert False, (
"Conflicting header files in header search paths. "
+ '"%s" maps to both "%s" and "%s".'
% (key, result[key], header_map[key])
)
result[key] = header_map[key]
return result
def single_subdir_glob(
dirpath, glob_pattern, excludes=None, prefix=None, build_env=None, search_base=None
):
if excludes is None:
excludes = []
results = {}
files = glob(
[os.path.join(dirpath, glob_pattern)],
excludes=excludes,
build_env=build_env,
search_base=search_base,
)
for f in files:
if dirpath:
key = f[len(dirpath) + 1 :]
else:
key = f
if prefix:
# `f` is a string, but we need to create correct platform-specific Path.
# This method is called by tests for both posix style paths and
# windows style paths.
# When running tests, search_base is always set
# and happens to have the correct platform-specific Path type.
cls = PurePath if not search_base else type(search_base)
key = str(cls(prefix) / cls(key))
results[key] = f
return results
def subdir_glob(
glob_specs, excludes=None, prefix=None, build_env=None, search_base=None
):
"""
Given a list of tuples, the form of (relative-sub-directory, glob-pattern),
return a dict of sub-directory relative paths to full paths. Useful for
defining header maps for C/C++ libraries which should be relative the given
sub-directory.
If prefix is not None, prepends it it to each key in the dictionary.
"""
if excludes is None:
excludes = []
results = []
for dirpath, glob_pattern in glob_specs:
results.append(
single_subdir_glob(
dirpath, glob_pattern, excludes, prefix, build_env, search_base
)
)
return merge_maps(*results)
def _get_package_name(func_name, build_env=None):
"""The name of the package being evaluated.
For example, in the BUCK file "some/package/BUCK", its value will be
"some/package".
If the BUCK file calls a function defined in a *.bzl file, package_name()
will return the package of the calling BUCK file. For example, if there is
a BUCK file at "some/package/BUCK" and "some/other/package/ext.bzl"
extension file, when BUCK file calls a function inside of ext.bzl file
it will still return "some/package" and not "some/other/package".
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
:rtype: str
"""
assert isinstance(build_env, BuildFileContext), (
"Cannot use `%s()` at the top-level of an included file." % func_name
)
return build_env.base_path
@provide_for_build
def get_base_path(build_env=None):
"""Get the base path to the build file that was initially evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
:rtype: str
"""
return _get_package_name("get_base_path", build_env=build_env)
@provide_for_build
def package_name(build_env=None):
"""The name of the package being evaluated.
For example, in the BUCK file "some/package/BUCK", its value will be
"some/package".
If the BUCK file calls a function defined in a *.bzl file, package_name()
will return the package of the calling BUCK file. For example, if there is
a BUCK file at "some/package/BUCK" and "some/other/package/ext.bzl"
extension file, when BUCK file calls a function inside of ext.bzl file
it will still return "some/package" and not "some/other/package".
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
:rtype: str
"""
return _get_package_name("package_name", build_env=build_env)
@provide_for_build
def fail(message, attr=None, build_env=None):
"""Raises a parse error.
:param message: Error message to display for the user.
The object is converted to a string.
:param attr: Optional name of the attribute that caused the error.
"""
attribute_prefix = "attribute " + attr + ": " if attr is not None else ""
msg = attribute_prefix + str(message)
raise BuildFileFailError(msg)
@provide_for_build
def get_cell_name(build_env=None):
"""Get the cell name of the build file that was initially evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "cell". The return value will be "" if
the build file does not have a cell
:rtype: str
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `get_cell_name()` at the top-level of an included file."
return build_env.cell_name
@provide_for_build
def select(conditions, no_match_message=None, build_env=None):
"""Allows to provide a configurable value for an attribute"""
return SelectorList([SelectorValue(conditions, no_match_message)])
@provide_as_native_rule
def repository_name(build_env=None):
"""
Get the repository (cell) name of the build file that was initially
evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "@cell". The return value will be "@" if
the build file is in the main (standalone) repository.
:rtype: str
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `repository_name()` at the top-level of an included file."
return "@" + build_env.cell_name
@provide_as_native_rule
def rule_exists(name, build_env=None):
"""
:param name: name of the build rule
:param build_env: current build environment
:return: True if a rule with provided name has already been defined in
current file.
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `rule_exists()` at the top-level of an included file."
return name in build_env.rules
def flatten_list_of_dicts(list_of_dicts):
"""Flatten the given list of dictionaries by merging l[1:] onto
l[0], one at a time. Key/Value pairs which appear in later list entries
will override those that appear in earlier entries
:param list_of_dicts: the list of dict objects to flatten.
:return: a single dict containing the flattened list
"""
return_value = {}
for d in list_of_dicts:
for k, v in iteritems(d):
return_value[k] = v
return return_value
@provide_for_build
def flatten_dicts(*args, **_):
"""Flatten the given list of dictionaries by merging args[1:] onto
args[0], one at a time.
:param *args: the list of dict objects to flatten.
:param **_: ignore the build_env kwarg
:return: a single dict containing the flattened list
"""
return flatten_list_of_dicts(args)
@provide_for_build
def depset(elements, build_env=None):
"""Creates an instance of sets with deterministic iteration order.
:param elements: the list of elements constituting the returned depset.
:rtype: DeterministicSet
"""
return DeterministicSet(elements)
GENDEPS_SIGNATURE = re.compile(
r"^#@# GENERATED FILE: DO NOT MODIFY ([a-f0-9]{40}) #@#\n$"
)
class BuildFileProcessor(object):
"""Handles the processing of a single build file.
:type _current_build_env: AbstractContext | None
"""
SAFE_MODULES_CONFIG = {
"os": ["environ", "getenv", "path", "sep", "pathsep", "linesep"],
"os.path": [
"basename",
"commonprefix",
"dirname",
"isabs",
"join",
"normcase",
"relpath",
"split",
"splitdrive",
"splitext",
"sep",
"pathsep",
],
"pipes": ["quote"],
}
def __init__(
self,
project_root,
cell_roots,
cell_name,
build_file_name,
allow_empty_globs,
watchman_client,
watchman_glob_stat_results,
watchman_use_glob_generator,
project_import_whitelist=None,
implicit_includes=None,
extra_funcs=None,
configs=None,
env_vars=None,
ignore_paths=None,
disable_implicit_native_rules=False,
warn_about_deprecated_syntax=True,
):
if project_import_whitelist is None:
project_import_whitelist = []
if implicit_includes is None:
implicit_includes = []
if extra_funcs is None:
extra_funcs = []
if configs is None:
configs = {}
if env_vars is None:
env_vars = {}
if ignore_paths is None:
ignore_paths = []
self._include_cache = {}
self._current_build_env = None
self._sync_cookie_state = SyncCookieState()
self._project_root = project_root
self._cell_roots = cell_roots
self._cell_name = cell_name
self._build_file_name = build_file_name
self._implicit_includes = implicit_includes
self._allow_empty_globs = allow_empty_globs
self._watchman_client = watchman_client
self._watchman_glob_stat_results = watchman_glob_stat_results
self._watchman_use_glob_generator = watchman_use_glob_generator
self._configs = configs
self._env_vars = env_vars
self._ignore_paths = ignore_paths
self._disable_implicit_native_rules = disable_implicit_native_rules
self._warn_about_deprecated_syntax = warn_about_deprecated_syntax
lazy_global_functions = {}
lazy_native_functions = {}
for func in BUILD_FUNCTIONS + extra_funcs:
func_with_env = LazyBuildEnvPartial(func)
lazy_global_functions[func.__name__] = func_with_env
for func in NATIVE_FUNCTIONS:
func_with_env = LazyBuildEnvPartial(func)
lazy_native_functions[func.__name__] = func_with_env
self._global_functions = lazy_global_functions
self._native_functions = lazy_native_functions
self._native_module_class_for_extension = self._create_native_module_class(
self._global_functions, self._native_functions
)
self._native_module_class_for_build_file = self._create_native_module_class(
self._global_functions,
[] if self._disable_implicit_native_rules else self._native_functions,
)
self._import_whitelist_manager = ImportWhitelistManager(
import_whitelist=self._create_import_whitelist(project_import_whitelist),
safe_modules_config=self.SAFE_MODULES_CONFIG,
path_predicate=lambda path: is_in_dir(path, self._project_root),
)
# Set of helpers callable from the child environment.
self._default_globals_for_extension = self._create_default_globals(False, False)
self._default_globals_for_implicit_include = self._create_default_globals(
False, True
)
self._default_globals_for_build_file = self._create_default_globals(True, False)
def _create_default_globals(self, is_build_file, is_implicit_include):
# type: (bool) -> Dict[str, Callable]
return {
"include_defs": functools.partial(self._include_defs, is_implicit_include),
"add_build_file_dep": self._add_build_file_dep,
"read_config": self._read_config,
"implicit_package_symbol": self._implicit_package_symbol,
"allow_unsafe_import": self._import_whitelist_manager.allow_unsafe_import,
"glob": self._glob,
"subdir_glob": self._subdir_glob,
"load": functools.partial(self._load, is_implicit_include),
"struct": struct,
"provider": self._provider,
"host_info": self._host_info,
"native": self._create_native_module(is_build_file=is_build_file),
}
def _create_native_module(self, is_build_file):
"""
Creates a native module exposing built-in Buck rules.
This module allows clients to refer to built-in Buck rules using
"native.<native_rule>" syntax in their build files. For example,
"native.java_library(...)" will use a native Java library rule.
:return: 'native' module struct.
"""
native_globals = {}
self._install_builtins(native_globals, force_native_rules=not is_build_file)
assert "glob" not in native_globals
assert "host_info" not in native_globals
assert "implicit_package_symbol" not in native_globals
assert "read_config" not in native_globals
native_globals["glob"] = self._glob
native_globals["host_info"] = self._host_info
native_globals["implicit_package_symbol"] = self._implicit_package_symbol
native_globals["read_config"] = self._read_config
return (
self._native_module_class_for_build_file(**native_globals)
if is_build_file
else self._native_module_class_for_extension(**native_globals)
)
@staticmethod
def _create_native_module_class(global_functions, native_functions):
"""
Creates a native module class.
:return: namedtuple instance for native module
"""
return collections.namedtuple(
"native",
list(global_functions)
+ list(native_functions)
+ ["glob", "host_info", "read_config", "implicit_package_symbol"],
)
def _wrap_env_var_read(self, read, real):
"""
Return wrapper around function that reads an environment variable so
that the read is recorded.
"""
@functools.wraps(real)
def wrapper(varname, *arg, **kwargs):
self._record_env_var(varname, read(varname))
return real(varname, *arg, **kwargs)
# Save the real function for restoration.
wrapper._real = real
return wrapper
@contextlib.contextmanager
def _with_env_interceptor(self, read, obj, *attrs):
"""
Wrap a function, found at `obj.attr`, that reads an environment
variable in a new function which records the env var read.
"""
orig = []
for attr in attrs:
real = getattr(obj, attr)
wrapped = self._wrap_env_var_read(read, real)
setattr(obj, attr, wrapped)
orig.append((attr, real))
try:
yield
finally:
for attr, real in orig:
setattr(obj, attr, real)
@contextlib.contextmanager
def with_env_interceptors(self):
"""
Install environment variable read interceptors into all known ways that
a build file can access the environment.
"""
# Use a copy of the env to provide a function to get at the low-level
# environment. The wrappers will use this when recording the env var.
read = dict(os.environ).get
# Install interceptors into the main ways a user can read the env.
with self._with_env_interceptor(
read, os.environ, "__contains__", "__getitem__", "get"
):
yield
@staticmethod
def _merge_explicit_globals(src, dst, whitelist=None, whitelist_mapping=None):
# type: (types.ModuleType, Dict[str, Any], Tuple[str], Dict[str, str]) -> None
"""Copy explicitly requested global definitions from one globals dict to another.
If whitelist is set, only globals from the whitelist will be pulled in.
If whitelist_mapping is set, globals will be exported under the name of the keyword. For
example, foo="bar" would mean that a variable with name "bar" in imported file, will be
available as "foo" in current file.
"""
if whitelist is not None:
for symbol in whitelist:
if symbol not in src.__dict__:
raise KeyError('"%s" is not defined in %s' % (symbol, src.__name__))
dst[symbol] = src.__dict__[symbol]
if whitelist_mapping is not None:
for exported_name, symbol in iteritems(whitelist_mapping):
if symbol not in src.__dict__:
raise KeyError('"%s" is not defined in %s' % (symbol, src.__name__))
dst[exported_name] = src.__dict__[symbol]
def _merge_globals(self, mod, dst):
# type: (types.ModuleType, Dict[str, Any]) -> None
"""Copy the global definitions from one globals dict to another.
Ignores special attributes and attributes starting with '_', which
typically denote module-level private attributes.
"""
keys = getattr(mod, "__all__", mod.__dict__.keys())
for key in keys:
# Block copying modules unless they were specified in '__all__'
block_copying_module = not hasattr(mod, "__all__") and isinstance(
mod.__dict__[key], types.ModuleType
)
if (
not key.startswith("_")
and key not in _HIDDEN_GLOBALS
and not block_copying_module
):
dst[key] = mod.__dict__[key]
def _update_functions(self, build_env):
"""
Updates the build functions to use the given build context when called.
"""
for function in itervalues(self._global_functions):
function.build_env = build_env
for function in itervalues(self._native_functions):
function.build_env = build_env
def _install_builtins(self, namespace, force_native_rules=False):
"""
Installs the build functions, by their name, into the given namespace.
"""
for name, function in iteritems(self._global_functions):
namespace[name] = function.invoke
if not self._disable_implicit_native_rules or force_native_rules:
for name, function in iteritems(self._native_functions):
namespace[name] = function.invoke
@contextlib.contextmanager
def with_builtins(self, namespace):
"""
Installs the build functions for the duration of a `with` block.
"""
original_namespace = namespace.copy()
self._install_builtins(namespace)
try:
yield
finally:
namespace.clear()
namespace.update(original_namespace)
def _resolve_include(self, name):
# type: (str) -> BuildInclude
"""Resolve the given include def name to a BuildInclude metadata."""
match = re.match(r"^([A-Za-z0-9_]*)//(.*)$", name)
if match is None:
raise ValueError(
"include_defs argument {} should be in the form of "
"//path or cellname//path".format(name)
)
cell_name = match.group(1)
relative_path = match.group(2)
if len(cell_name) > 0:
cell_root = self._cell_roots.get(cell_name)
if cell_root is None:
raise KeyError(
"include_defs argument {} references an unknown cell named {} "
"known cells: {!r}".format(name, cell_name, self._cell_roots)
)
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(os.path.join(cell_root, relative_path)),
)
else:
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(os.path.join(self._project_root, relative_path)),
)
def _get_load_path(self, label):
# type: (str) -> BuildInclude
"""Resolve the given load function label to a BuildInclude metadata."""
match = _LOAD_TARGET_PATH_RE.match(label)
if match is None:
raise ValueError(
"load label {} should be in the form of "
"//path:file or cellname//path:file".format(label)
)
cell_name = match.group("cell")
if cell_name:
if cell_name.startswith("@"):
cell_name = cell_name[1:]
elif self._warn_about_deprecated_syntax:
self._emit_warning(
'{} has a load label "{}" that uses a deprecated cell format. '
'"{}" should instead be "@{}".'.format(
self._current_build_env.path, label, cell_name, cell_name
),
"load function",
)
else:
cell_name = self._current_build_env.cell_name
relative_path = match.group("package")
file_name = match.group("target")
label_root = match.group("root")
if not label_root:
# relative include. e.g. :foo.bzl
if "/" in file_name:
raise ValueError(
"Relative loads work only for files in the same directory. "
+ "Please use absolute label instead ([cell]//pkg[/pkg]:target)."
)
callee_dir = os.path.dirname(self._current_build_env.path)
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(os.path.join(callee_dir, file_name)),
)
elif cell_name:
cell_root = self._cell_roots.get(cell_name)
if cell_root is None:
raise KeyError(
"load label {} references an unknown cell named {} "
"known cells: {!r}".format(label, cell_name, self._cell_roots)
)
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(
os.path.join(cell_root, relative_path, file_name)
),
)
else:
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(
os.path.join(self._project_root, relative_path, file_name)
),
)
def _read_config(self, section, field, default=None):
# type: (str, str, Any) -> Any
"""
Lookup a setting from `.buckconfig`.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Lookup the value and record it in this build file's context.
key = section, field
value = self._configs.get(key)
if value is not None and not isinstance(value, str):
# Python 2 returns unicode values from parsed JSON configs, but
# only str types should be exposed to clients
value = value.encode("utf-8")
# replace raw values to avoid decoding for frequently used configs
self._configs[key] = value
build_env.used_configs[section][field] = value
# If no config setting was found, return the default.
if value is None:
return default
return value
def _implicit_package_symbol(self, symbol, default=None):
# type: (str, Any) -> Any
"""
Gives access to a symbol that has been implicitly loaded for the package of the
build file that is currently being evaluated. If the symbol was not present,
`default` will be returned.
"""
build_env = self._current_build_env
return build_env.implicit_package_symbols.get(symbol, default)
def _glob(
self,
includes,
excludes=None,
include_dotfiles=False,
search_base=None,
exclude=None,
):
assert exclude is None or excludes is None, (
"Mixing 'exclude' and 'excludes' attributes is not allowed. Please replace your "
"exclude and excludes arguments with a single 'excludes = %r'."
% (exclude + excludes)
)
excludes = excludes or exclude
build_env = self._current_build_env # type: BuildFileContext
return glob(
includes,
excludes=excludes,
include_dotfiles=include_dotfiles,
search_base=search_base,
build_env=build_env,
)
def _subdir_glob(self, glob_specs, excludes=None, prefix=None, search_base=None):
build_env = self._current_build_env
return subdir_glob(
glob_specs,
excludes=excludes,
prefix=prefix,
search_base=search_base,
build_env=build_env,
)
def _record_env_var(self, name, value):
# type: (str, Any) -> None
"""
Record a read of an environment variable.
This method is meant to wrap methods in `os.environ` when called from
any files or includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Lookup the value and record it in this build file's context.
build_env.used_env_vars[name] = value
def _called_from_project_file(self):
# type: () -> bool
"""
Returns true if the function was called from a project file.
"""
frame = get_caller_frame(skip=[__name__])
filename = inspect.getframeinfo(frame).filename
return is_in_dir(filename, self._project_root)
def _include_defs(self, is_implicit_include, name, namespace=None):
# type: (bool, str, Optional[str]) -> None
"""Pull the named include into the current caller's context.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Resolve the named include to its path and process it to get its
# build context and module.
build_include = self._resolve_include(name)
inner_env, mod = self._process_include(build_include, is_implicit_include)
# Look up the caller's stack frame and merge the include's globals
# into it's symbol table.
frame = get_caller_frame(skip=["_functools", __name__])
if namespace is not None:
# If using a fresh namespace, create a fresh module to populate.
fresh_module = imp.new_module(namespace)
fresh_module.__file__ = mod.__file__
self._merge_globals(mod, fresh_module.__dict__)
frame.f_globals[namespace] = fresh_module
else:
self._merge_globals(mod, frame.f_globals)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
def _load(self, is_implicit_include, name, *symbols, **symbol_kwargs):
# type: (bool, str, *str, **str) -> None
"""Pull the symbols from the named include into the current caller's context.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
assert symbols or symbol_kwargs, "expected at least one symbol to load"
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Resolve the named include to its path and process it to get its
# build context and module.
build_include = self._get_load_path(name)
inner_env, module = self._process_include(build_include, is_implicit_include)
# Look up the caller's stack frame and merge the include's globals
# into it's symbol table.
frame = get_caller_frame(skip=["_functools", __name__])
BuildFileProcessor._merge_explicit_globals(
module, frame.f_globals, symbols, symbol_kwargs
)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
def _load_package_implicit(self, build_env, package_implicit_load):
"""
Updates `build_env` to contain all symbols from `package_implicit_load`
Args:
build_env: The build environment on which to modify includes /
implicit_package_symbols properties
package_implicit_load: A dictionary with "load_path", the first part of the
a `load` statement, and "load_symbols", a dictionary
that works like the **symbols attribute of `load`
"""
# Resolve the named include to its path and process it to get its
# build context and module.
build_include = self._get_load_path(package_implicit_load["load_path"])
inner_env, module = self._process_include(build_include, True)
# Validate that symbols that are requested explicitly by config are present
# in the .bzl file
for key, value in iteritems(package_implicit_load["load_symbols"]):
try:
build_env.implicit_package_symbols[key] = getattr(module, value)
except AttributeError:
raise BuildFileFailError(
"Could not find symbol '{}' in implicitly loaded extension '{}'".format(
value, package_implicit_load["load_path"]
)
)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
@staticmethod
def _provider(doc="", fields=None):
# type: (str, Union[List[str], Dict[str, str]]) -> Callable
"""Creates a declared provider factory.
The return value of this function can be used to create "struct-like"
values. Example:
SomeInfo = provider()
def foo():
return 3
info = SomeInfo(x = 2, foo = foo)
print(info.x + info.foo()) # prints 5
Optional fields can be used to restrict the set of allowed fields.
Example:
SomeInfo = provider(fields=["data"])
info = SomeInfo(data="data") # valid
info = SomeInfo(foo="bar") # runtime exception
"""
if fields:
return create_struct_class(fields)
return struct
def _add_build_file_dep(self, name):
# type: (str) -> None
"""
Explicitly specify a dependency on an external file.
For instance, this can be used to specify a dependency on an external
executable that will be invoked, or some other external configuration
file.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
cell_name, path = self._resolve_include(name)
build_env.includes.add(path)
@staticmethod
def _host_info():
return _cached_host_info
@contextlib.contextmanager
def _set_build_env(self, build_env):
# type: (AbstractContext) -> Iterator[None]
"""Set the given build context as the current context, unsetting it upon exit."""
old_env = self._current_build_env
self._current_build_env = build_env
self._update_functions(self._current_build_env)
try:
yield
finally:
self._current_build_env = old_env
self._update_functions(self._current_build_env)
def _emit_warning(self, message, source):
# type: (str, str) -> None
"""
Add a warning to the current build_env's diagnostics.
"""
if self._current_build_env is not None:
self._current_build_env.diagnostics.append(
Diagnostic(
message=message, level="warning", source=source, exception=None
)
)
@staticmethod
def _create_import_whitelist(project_import_whitelist):
# type: (List[str]) -> Set[str]
"""
Creates import whitelist by joining the global whitelist with the project specific one
defined in '.buckconfig'.
"""
global_whitelist = [
"copy",
"re",
"functools",
"itertools",
"json",
"hashlib",
"types",
"string",
"ast",
"__future__",
"collections",
"operator",
"fnmatch",
"copy_reg",
]
return set(global_whitelist + project_import_whitelist)
def _file_access_wrapper(self, real):
"""
Return wrapper around function so that accessing a file produces warning if it is
not a known dependency.
"""
@functools.wraps(real)
def wrapper(filename, *arg, **kwargs):
# Restore original 'open' because it is used by 'inspect.currentframe()' in
# '_called_from_project_file()'
with self._wrap_file_access(wrap=False):
if self._called_from_project_file():
path = os.path.abspath(filename)
if path not in self._current_build_env.includes:
dep_path = "//" + os.path.relpath(path, self._project_root)
warning_message = (
"Access to a non-tracked file detected! {0} is not a ".format(
path
)
+ "known dependency and it should be added using 'add_build_file_dep' "
+ "function before trying to access the file, e.g.\n"
+ "'add_build_file_dep('{0}')'\n".format(dep_path)
+ "The 'add_build_file_dep' function is documented at "
+ "https://buck.build/function/add_build_file_dep.html\n"
)
self._emit_warning(warning_message, "sandboxing")
return real(filename, *arg, **kwargs)
# Save the real function for restoration.
wrapper._real = real
return wrapper
@contextlib.contextmanager
def _wrap_fun_for_file_access(self, obj, attr, wrap=True):
"""
Wrap a function to check if accessed files are known dependencies.
"""
real = getattr(obj, attr)
if wrap:
# Don't wrap again
if not hasattr(real, "_real"):
wrapped = self._file_access_wrapper(real)
setattr(obj, attr, wrapped)
elif hasattr(real, "_real"):
# Restore real function if it was wrapped
setattr(obj, attr, real._real)
try:
yield
finally:
setattr(obj, attr, real)
def _wrap_file_access(self, wrap=True):
"""
Wrap 'open' so that they it checks if accessed files are known dependencies.
If 'wrap' is equal to False, restore original function instead.
"""
return self._wrap_fun_for_file_access(builtins, "open", wrap)
@contextlib.contextmanager
def _build_file_sandboxing(self):
"""
Creates a context that sandboxes build file processing.
"""
with self._wrap_file_access():
with self._import_whitelist_manager.allow_unsafe_import(False):
yield
@traced(stats_key="Process")
def _process(self, build_env, path, is_implicit_include, package_implicit_load):
# type: (_GCT, str, bool, Optional[LoadStatement]) -> Tuple[_GCT, types.ModuleType]
"""Process a build file or include at the given path.
:param build_env: context of the file to process.
:param path: target-like path to the file to process.
:param is_implicit_include: whether the file being processed is an implicit include, or was
included from an implicit include.
:package_implicit_load: if provided, a dictionary containing the path to
load for this given package, and the symbols to load
from that .bzl file.
:returns: build context (potentially different if retrieved from cache) and loaded module.
"""
if isinstance(build_env, IncludeContext):
default_globals = (
self._default_globals_for_implicit_include
if is_implicit_include
else self._default_globals_for_extension
)
else:
default_globals = self._default_globals_for_build_file
emit_trace(path)
# Install the build context for this input as the current context.
with self._set_build_env(build_env):
# Don't include implicit includes if the current file being
# processed is an implicit include
if not is_implicit_include:
for include in self._implicit_includes:
build_include = self._resolve_include(include)
inner_env, mod = self._process_include(build_include, True)
self._merge_globals(mod, default_globals)
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
if package_implicit_load:
self._load_package_implicit(build_env, package_implicit_load)
# Build a new module for the given file, using the default globals
# created above.
module = imp.new_module(path)
module.__file__ = path
module.__dict__.update(default_globals)
# We don't open this file as binary, as we assume it's a textual source
# file.
with scoped_trace("IO", stats_key="IO"):
with self._wrap_file_access(wrap=False):
with open(path, "r") as f:
contents = f.read()
with scoped_trace("Compile", stats_key="Compile"):
# Enable absolute imports. This prevents the compiler from
# trying to do a relative import first, and warning that
# this module doesn't exist in sys.modules.
future_features = absolute_import.compiler_flag
code = compile(contents, path, "exec", future_features, 1)
# Execute code with build file sandboxing
with self._build_file_sandboxing():
exec(code, module.__dict__)
return build_env, module
def _process_include(self, build_include, is_implicit_include):
# type: (BuildInclude, bool) -> Tuple[AbstractContext, types.ModuleType]
"""Process the include file at the given path.
:param build_include: build include metadata (cell_name and path).
:param is_implicit_include: whether the file being processed is an implicit include, or was
included from an implicit include.
"""
# First check the cache.
cached = self._include_cache.get(build_include.path)
if cached is not None:
return cached
build_env = IncludeContext(
cell_name=build_include.cell_name, path=build_include.path
)
build_env, mod = self._process(
build_env,
build_include.path,
is_implicit_include=is_implicit_include,
package_implicit_load=None,
)
self._include_cache[build_include.path] = build_env, mod
return build_env, mod
def _process_build_file(
self, watch_root, project_prefix, path, package_implicit_load
):
# type: (str, str, str, Optional[LoadStatement]) -> Tuple[BuildFileContext, types.ModuleType]
"""Process the build file at the given path."""
# Create the build file context, including the base path and directory
# name of the given path.
relative_path_to_build_file = os.path.relpath(path, self._project_root).replace(
"\\", "/"
)
len_suffix = -len(self._build_file_name) - 1
base_path = relative_path_to_build_file[:len_suffix]
dirname = os.path.dirname(path)
build_env = BuildFileContext(
self._project_root,
base_path,
path,
dirname,
self._cell_name,
self._allow_empty_globs,
self._ignore_paths,
self._watchman_client,
watch_root,
project_prefix,
self._sync_cookie_state,
self._watchman_glob_stat_results,
self._watchman_use_glob_generator,
{},
)
return self._process(
build_env,
path,
is_implicit_include=False,
package_implicit_load=package_implicit_load,
)
def process(
self, watch_root, project_prefix, path, diagnostics, package_implicit_load
):
# type: (str, Optional[str], str, List[Diagnostic], Optional[LoadStatement]) -> List[Dict[str, Any]]
"""Process a build file returning a dict of its rules and includes."""
build_env, mod = self._process_build_file(
watch_root,
project_prefix,
os.path.join(self._project_root, path),
package_implicit_load=package_implicit_load,
)
# Initialize the output object to a map of the parsed rules.
values = list(itervalues(build_env.rules))
# Add in tracked included files as a special meta rule.
values.append({"__includes": [path] + sorted(build_env.includes)})
# Add in tracked used config settings as a special meta rule.
values.append({"__configs": build_env.used_configs})
# Add in used environment variables as a special meta rule.
values.append({"__env": build_env.used_env_vars})
diagnostics.extend(build_env.diagnostics)
return values
class InvalidSignatureError(Exception):
pass
def format_traceback(tb):
formatted = []
for entry in traceback.extract_tb(tb):
(filename, line_number, function_name, text) = entry
formatted.append(
{
"filename": filename,
"line_number": line_number,
"function_name": function_name,
"text": text,
}
)
return formatted
def format_exception_info(exception_info):
(exc_type, exc_value, exc_traceback) = exception_info
formatted = {
"type": exc_type.__name__,
"value": str(exc_value),
"traceback": format_traceback(exc_traceback),
}
if exc_type is SyntaxError:
formatted["filename"] = exc_value.filename
formatted["lineno"] = exc_value.lineno
formatted["offset"] = exc_value.offset
formatted["text"] = exc_value.text
return formatted
def encode_result(values, diagnostics, profile):
# type: (List[Dict[str, object]], List[Diagnostic], Optional[str]) -> str
result = {
"values": [
{k: v for k, v in iteritems(value) if v is not None} for value in values
]
}
json_encoder = BuckJSONEncoder()
if diagnostics:
encoded_diagnostics = []
for d in diagnostics:
encoded = {"message": d.message, "level": d.level, "source": d.source}
if d.exception:
encoded["exception"] = format_exception_info(d.exception)
encoded_diagnostics.append(encoded)
result["diagnostics"] = encoded_diagnostics
if profile is not None:
result["profile"] = profile
try:
return json_encoder.encode(result)
except Exception as e:
# Try again without the values
result["values"] = []
if "diagnostics" not in result:
result["diagnostics"] = []
result["diagnostics"].append(
{
"message": str(e),
"level": "fatal",
"source": "parse",
"exception": format_exception_info(sys.exc_info()),
}
)
return json_encoder.encode(result)
def process_with_diagnostics(build_file_query, build_file_processor, to_parent):
start_time = time.time()
build_file = build_file_query.get("buildFile")
watch_root = build_file_query.get("watchRoot")
project_prefix = build_file_query.get("projectPrefix")
package_implicit_load = build_file_query.get("packageImplicitLoad")
build_file = cygwin_adjusted_path(build_file)
watch_root = cygwin_adjusted_path(watch_root)
if project_prefix is not None:
project_prefix = cygwin_adjusted_path(project_prefix)
diagnostics = []
values = []
try:
values = build_file_processor.process(
watch_root,
project_prefix,
build_file,
diagnostics=diagnostics,
package_implicit_load=package_implicit_load,
)
except BaseException as e:
# sys.exit() don't emit diagnostics.
if e is not SystemExit:
if isinstance(e, WatchmanError):
source = "watchman"
message = e.msg
else:
source = "parse"
message = str(e)
diagnostics.append(
Diagnostic(
message=message,
level="fatal",
source=source,
exception=sys.exc_info(),
)
)
raise
finally:
java_process_send_result(to_parent, values, diagnostics, None)
end_time = time.time()
return end_time - start_time
def java_process_send_result(to_parent, values, diagnostics, profile_result):
"""Sends result to the Java process"""
data = encode_result(values, diagnostics, profile_result)
if PY3:
# in Python 3 write expects bytes instead of string
data = data.encode("utf-8")
to_parent.write(data)
to_parent.flush()
def silent_excepthook(exctype, value, tb):
# We already handle all exceptions by writing them to the parent, so
# no need to dump them again to stderr.
pass
def _optparse_store_kv(option, opt_str, value, parser):
"""Optparse option callback which parses input as K=V, and store into dictionary.
:param optparse.Option option: Option instance
:param str opt_str: string representation of option flag
:param str value: argument value
:param optparse.OptionParser parser: parser instance
"""
result = value.split("=", 1)
if len(result) != 2:
raise optparse.OptionError(
"Expected argument of to be in the form of X=Y".format(opt_str), option
)
(k, v) = result
# Get or create the dictionary
dest_dict = getattr(parser.values, option.dest)
if dest_dict is None:
dest_dict = {}
setattr(parser.values, option.dest, dest_dict)
dest_dict[k] = v
# Inexplicably, this script appears to run faster when the arguments passed
# into it are absolute paths. However, we want the "buck.base_path" property
# of each rule to be printed out to be the base path of the build target that
# identifies the rule. That means that when parsing a BUCK file, we must know
# its path relative to the root of the project to produce the base path.
#
# To that end, the first argument to this script must be an absolute path to
# the project root. It must be followed by one or more absolute paths to
# BUCK files under the project root. If no paths to BUCK files are
# specified, then it will traverse the project root for BUCK files, excluding
# directories of generated files produced by Buck.
#
# All of the build rules that are parsed from the BUCK files will be printed
# to stdout encoded in JSON. That means that printing out other information
# for debugging purposes will break the JSON encoding, so be careful!
def main():
# Our parent expects to read JSON from our stdout, so if anyone
# uses print, buck will complain with a helpful "but I wanted an
# array!" message and quit. Redirect stdout to stderr so that
# doesn't happen. Actually dup2 the file handle so that writing
# to file descriptor 1, os.system, and so on work as expected too.
# w instead of a mode is used because of https://bugs.python.org/issue27805
to_parent = os.fdopen(os.dup(sys.stdout.fileno()), "wb")
os.dup2(sys.stderr.fileno(), sys.stdout.fileno())
parser = optparse.OptionParser()
parser.add_option(
"--project_root", action="store", type="string", dest="project_root"
)
parser.add_option(
"--cell_root",
action="callback",
type="string",
dest="cell_roots",
metavar="NAME=PATH",
help="Cell roots that can be referenced by includes.",
callback=_optparse_store_kv,
default={},
)
parser.add_option("--cell_name", action="store", type="string", dest="cell_name")
parser.add_option(
"--build_file_name", action="store", type="string", dest="build_file_name"
)
parser.add_option(
"--allow_empty_globs",
action="store_true",
dest="allow_empty_globs",
help="Tells the parser not to raise an error when glob returns no results.",
)
parser.add_option(
"--use_watchman_glob",
action="store_true",
dest="use_watchman_glob",
help="Invokes `watchman query` to get lists of files instead of globbing in-process.",
)
parser.add_option(
"--watchman_use_glob_generator",
action="store_true",
dest="watchman_use_glob_generator",
help="Uses Watchman glob generator to speed queries",
)
parser.add_option(
"--watchman_glob_stat_results",
action="store_true",
dest="watchman_glob_stat_results",
help="Invokes `stat()` to sanity check result of `watchman query`.",
)
parser.add_option(
"--watchman_socket_path",
action="store",
type="string",
dest="watchman_socket_path",
help="Path to Unix domain socket/named pipe as returned by `watchman get-sockname`.",
)
parser.add_option(
"--watchman_query_timeout_ms",
action="store",
type="int",
dest="watchman_query_timeout_ms",
help="Maximum time in milliseconds to wait for watchman query to respond.",
)
parser.add_option("--include", action="append", dest="include")
parser.add_option("--config", help="BuckConfig settings available at parse time.")
parser.add_option("--ignore_paths", help="Paths that should be ignored.")
parser.add_option(
"--quiet",
action="store_true",
dest="quiet",
help="Stifles exception backtraces printed to stderr during parsing.",
)
parser.add_option(
"--profile", action="store_true", help="Profile every buck file execution"
)
parser.add_option(
"--build_file_import_whitelist",
action="append",
dest="build_file_import_whitelist",
)
parser.add_option(
"--disable_implicit_native_rules",
action="store_true",
help="Do not allow native rules in build files, only included ones",
)
parser.add_option(
"--warn_about_deprecated_syntax",
action="store_true",
help="Warn about deprecated syntax usage.",
)
(options, args) = parser.parse_args()
# Even though project_root is absolute path, it may not be concise. For
# example, it might be like "C:\project\.\rule".
#
# Under cygwin, the project root will be invoked from buck as C:\path, but
# the cygwin python uses UNIX-style paths. They can be converted using
# cygpath, which is necessary because abspath will treat C:\path as a
# relative path.
options.project_root = cygwin_adjusted_path(options.project_root)
project_root = os.path.abspath(options.project_root)
cell_roots = {
k: os.path.abspath(cygwin_adjusted_path(v))
for k, v in iteritems(options.cell_roots)
}
watchman_client = None
if options.use_watchman_glob:
client_args = {"sendEncoding": "json", "recvEncoding": "json"}
if options.watchman_query_timeout_ms is not None:
# pywatchman expects a timeout as a nonnegative floating-point
# value in seconds.
client_args["timeout"] = max(
0.0, options.watchman_query_timeout_ms / 1000.0
)
else:
client_args["timeout"] = DEFAULT_WATCHMAN_QUERY_TIMEOUT
if options.watchman_socket_path is not None:
client_args["sockpath"] = options.watchman_socket_path
client_args["transport"] = "local"
watchman_client = pywatchman.client(**client_args)
configs = {}
if options.config is not None:
with open(options.config, "rb") as f:
for section, contents in iteritems(json.load(f)):
for field, value in iteritems(contents):
configs[(section, field)] = value
ignore_paths = []
if options.ignore_paths is not None:
with open(options.ignore_paths, "rb") as f:
ignore_paths = [make_glob(i) for i in json.load(f)]
build_file_processor = BuildFileProcessor(
project_root,
cell_roots,
options.cell_name,
options.build_file_name,
options.allow_empty_globs,
watchman_client,
options.watchman_glob_stat_results,
options.watchman_use_glob_generator,
project_import_whitelist=options.build_file_import_whitelist or [],
implicit_includes=options.include or [],
configs=configs,
ignore_paths=ignore_paths,
disable_implicit_native_rules=options.disable_implicit_native_rules,
warn_about_deprecated_syntax=options.warn_about_deprecated_syntax,
)
# While processing, we'll write exceptions as diagnostic messages
# to the parent then re-raise them to crash the process. While
# doing so, we don't want Python's default unhandled exception
# behavior of writing to stderr.
orig_excepthook = None
if options.quiet:
orig_excepthook = sys.excepthook
sys.excepthook = silent_excepthook
# Process the build files with the env var interceptors and builtins
# installed.
with build_file_processor.with_env_interceptors():
with build_file_processor.with_builtins(builtins.__dict__):
processed_build_file = []
profiler = None
if options.profile:
profiler = Profiler(True)
profiler.start()
Tracer.enable()
for build_file in args:
query = {
"buildFile": build_file,
"watchRoot": project_root,
"projectPrefix": project_root,
}
duration = process_with_diagnostics(
query, build_file_processor, to_parent
)
processed_build_file.append(
{"buildFile": build_file, "duration": duration}
)
# From https://docs.python.org/2/using/cmdline.html :
#
# Note that there is internal buffering in file.readlines()
# and File Objects (for line in sys.stdin) which is not
# influenced by this option. To work around this, you will
# want to use file.readline() inside a while 1: loop.
for line in wait_and_read_build_file_query():
if line == "":
break
build_file_query = json.loads(line)
if build_file_query.get("command") == "report_profile":
report_profile(options, to_parent, processed_build_file, profiler)
else:
duration = process_with_diagnostics(
build_file_query, build_file_processor, to_parent
)
processed_build_file.append(
{
"buildFile": build_file_query["buildFile"],
"duration": duration,
}
)
if options.quiet:
sys.excepthook = orig_excepthook
# Python tries to flush/close stdout when it quits, and if there's a dead
# pipe on the other end, it will spit some warnings to stderr. This breaks
# tests sometimes. Prevent that by explicitly catching the error.
try:
to_parent.close()
except IOError:
pass
def wait_build_file_query():
_select([sys.stdin], [], [])
def wait_and_read_build_file_query():
def default_wait():
return
wait = default_wait
if sys.platform != "win32":
# wait_build_file_query() is useful to attribute time waiting for queries.
# Since select.select() is not supported on Windows, we currently don't have
# a reliable way to measure it on this platform. Then, we skip it.
wait = wait_build_file_query
while True:
wait()
line = sys.stdin.readline()
if not line:
return
yield line
def report_profile(options, to_parent, processed_build_file, profiler):
if options.profile:
try:
profiler.stop()
profile_result = profiler.generate_report()
extra_result = "Total: {:.2f} sec\n\n\n".format(profiler.total_time)
extra_result += "# Parsed {} files".format(len(processed_build_file))
processed_build_file.sort(
key=lambda current_child: current_child["duration"], reverse=True
)
# Only show the top ten buck files
if len(processed_build_file) > 10:
processed_build_file = processed_build_file[:10]
extra_result += ", {} slower BUCK files:\n".format(
len(processed_build_file)
)
else:
extra_result += "\n"
for info in processed_build_file:
extra_result += "Parsed {}: {:.2f} sec \n".format(
info["buildFile"], info["duration"]
)
extra_result += "\n\n"
profile_result = extra_result + profile_result
profile_result += Tracer.get_all_traces_and_reset()
java_process_send_result(to_parent, [], [], profile_result)
except Exception:
trace = traceback.format_exc()
print(str(trace))
raise
else:
java_process_send_result(to_parent, [], [], None)
def make_glob(pat):
# type: (str) -> str
if is_special(pat):
return pat
return pat + "/**"
# import autogenerated rule instances for effect.
try:
import generated_rules
except ImportError:
# If running directly or python tests of this code, this is not an error.
sys.stderr.write("Failed to load buck generated rules module.\n")
| glob |
profile.d.ts | export { profile as default } from "./"; |
||
log.ts | import * as fs from "fs";
import * as util from "util";
import { getArgs } from "./args";
const { consoleLogging, fileLogging } = getArgs();
const logFile = fileLogging ? fs.createWriteStream(fileLogging, { flags: "w" }) : null;
const stdout = process.stdout;
const stderr = process.stderr;
export function log(...args: unknown[]): void {
if (logFile || consoleLogging) {
const logText = formatLogText(args);
logFile?.write(logText);
if (consoleLogging) {
stdout.write(logText);
}
}
}
export function logDir(obj: unknown, options: object): void {
if (logFile || consoleLogging) {
const logText = util.inspect(obj, { showHidden: false, depth: 3, colors: false, ...options }) + "\n";
logFile?.write(logText);
if (consoleLogging) {
stdout.write(logText);
}
}
}
export function logTrace(message: string): void {
if (logFile || consoleLogging) {
const logText = formatLogText([message, new Error().stack]);
logFile?.write(logText);
if (consoleLogging) {
stdout.write(logText);
}
}
}
export function logError(...args: unknown[]): void {
if (logFile || consoleLogging) {
const logText = formatLogText(args);
logFile?.write(logText);
if (consoleLogging) {
stderr.write(logText);
}
}
}
function | (args: unknown[]) {
return util.format.apply(null, args) + "\n";
}
| formatLogText |
$css.js | /* Element css manipulation.
*
* Get the computed style properties for the first element in the set of matched elements.
*
* |Name |Type |Desc |
* |-------|--------------------|--------------------------|
* |element|string array element|Elements to manipulate |
* |name |string |Property name |
* |return |string |Css value of first element|
*
* Set one or more CSS properties for the set of matched elements.
*
* |Name |Type |Desc |
* |-------|--------------------|----------------------|
* |element|string array element|Elements to manipulate|
* |name |string |Property name |
* |value |string |Css value |
*
* |Name |Type |Desc |
* |----------|--------------------|--------------------------------|
* |element |string array element|Elements to manipulate |
* |properties|object |Object of css-value pairs to set|
*
* ```javascript
* $css('#test', {
* 'color': '#fff',
* 'background': 'black'
* });
* $css('#test', 'display', 'block');
* $css('#test', 'color'); // -> #fff
* ```
*/
/* module
* env: browser
* test: browser
*/
_('isStr isObj kebabCase isUndef contain isNum $safeEls prefix');
function | (nodes, name, val) {
nodes = $safeEls(nodes);
var isGetter = isUndef(val) && isStr(name);
if (isGetter) return getCss(nodes[0], name);
var css = name;
if (!isObj(css)) {
css = {};
css[name] = val;
}
setCss(nodes, css);
}
function getCss(node, name) {
return (
node.style[prefix(name)] ||
getComputedStyle(node, '').getPropertyValue(name)
);
}
function setCss(nodes, css) {
each(nodes, function(node) {
var cssText = ';';
each(css, function(val, key) {
key = prefix.dash(key);
cssText += key + ':' + addPx(key, val) + ';';
});
node.style.cssText += cssText;
});
}
var cssNumProps = [
'column-count',
'columns',
'font-weight',
'line-weight',
'opacity',
'z-index',
'zoom'
];
function addPx(key, val) {
var needPx = isNum(val) && !contain(cssNumProps, kebabCase(key));
return needPx ? val + 'px' : val;
}
| exports |
_lib.py | ## -*- coding: utf-8 -*-
from .vendor.Qt import QtCore, QtGui, QtWidgets
import maya.cmds as cmds
import maya.mel as mel
import maya.OpenMayaUI as OpenMayaUI
import maya.OpenMaya as OpenMaya
import json
import os
def maya_version():
return int(cmds.about(v=True)[:4])
def maya_api_version():
return int(cmds.about(api=True))
if 2017 <= maya_version():
import shiboken2 as shiboken
else:
import shiboken
def get_anim_curve_editor():
return cmds.animCurveEditor('graphEditor1GraphEd', q=True, control=True)
def get_play_back_slider():
return mel.eval("$_=$gPlayBackSlider")
def | ():
_pbs = get_play_back_slider()
_c = OpenMayaUI.MQtUtil.findControl(_pbs)
w = shiboken.wrapInstance(long(_c), QtWidgets.QWidget)
return w
def get_anim_curve_editor_wiget():
_pbs = get_anim_curve_editor()
_c = OpenMayaUI.MQtUtil.findControl(_pbs)
if _c is None:
return None
w = shiboken.wrapInstance(long(_c), QtWidgets.QWidget)
return w.children()[1]
def get_timeline_highlight_range():
_pbs = get_play_back_slider()
_r = cmds.timeControl(_pbs, q=True, ra=True)
return _r[0], _r[1]
def get_timeline_renge():
r = cmds.timeControl(get_play_back_slider(), query=True, ra=True)
return [int(r[0]), int(r[1]) - 1]
def draw_data_to_multi_line_data(draw_data):
lines = []
for d in draw_data:
_dfr = d['fr']
_append = False
for line in lines:
_overlap = False
for l in line:
_lfr = l['fr']
# 既存のデータのフレーム範囲に追加分のフレームが被っている
if _lfr[0] <= _dfr[0] <= _lfr[1] or _lfr[0] <= _dfr[1] <= _lfr[1]:
_overlap = True
break
# 追加分のフレーム範囲が既存のデータをすっぽり包んでいる
if _dfr[0] <= _lfr[0] <= _dfr[1] and _dfr[0] <= _lfr[1] <= _dfr[1]:
_overlap = True
break
if not _overlap:
line.append(d)
_append = True
break
# 新しい行追加
if not _append:
lines.append([d])
return lines
#-----------------------------------------------------------------------------
# EOF
#-----------------------------------------------------------------------------
| get_timeline_wiget |
masking.go | package buildtest
import (
"math"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers/trace"
)
func RunBuildWithMasking(t *testing.T, config *common.RunnerConfig, setup buildSetupFn) | {
resp, err := common.GetRemoteSuccessfulBuildWithEnvs(config.Shell, false)
require.NoError(t, err)
build := &common.Build{
JobResponse: resp,
Runner: config,
}
build.Variables = append(
build.Variables,
common.JobVariable{Key: "MASKED_KEY", Value: "MASKED_VALUE", Masked: true},
common.JobVariable{Key: "CLEARTEXT_KEY", Value: "CLEARTEXT_VALUE", Masked: false},
)
if setup != nil {
setup(build)
}
buf, err := trace.New()
require.NoError(t, err)
defer buf.Close()
err = build.Run(&common.Config{}, &common.Trace{Writer: buf})
assert.NoError(t, err)
buf.Finish()
contents, err := buf.Bytes(0, math.MaxInt64)
assert.NoError(t, err)
assert.NotContains(t, string(contents), "MASKED_KEY=MASKED_VALUE")
assert.Contains(t, string(contents), "MASKED_KEY=[MASKED]")
assert.NotContains(t, string(contents), "CLEARTEXT_KEY=[MASKED]")
assert.Contains(t, string(contents), "CLEARTEXT_KEY=CLEARTEXT_VALUE")
} |
|
xor.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import onnx
from ..base import Base
from . import expect
class Xor(Base):
@staticmethod
def export():
|
@staticmethod
def export_xor_broadcast():
node = onnx.helper.make_node(
'Xor',
inputs=['x', 'y'],
outputs=['xor'],
broadcast=1,
)
#3d vs 1d
x = (np.random.randn(3, 4, 5) > 0).astype(np.bool)
y = (np.random.randn(5) > 0).astype(np.bool)
z = np.logical_xor(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_xor_bcast3v1d')
#3d vs 2d
x = (np.random.randn(3, 4, 5) > 0).astype(np.bool)
y = (np.random.randn(4, 5) > 0).astype(np.bool)
z = np.logical_xor(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_xor_bcast3v2d')
#4d vs 2d
x = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)
y = (np.random.randn(5, 6) > 0).astype(np.bool)
z = np.logical_xor(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_xor_bcast4v2d')
#4d vs 3d
x = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)
y = (np.random.randn(4, 5, 6) > 0).astype(np.bool)
z = np.logical_xor(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_xor_bcast4v3d')
@staticmethod
def export_xor_axis():
x = (np.random.randn(5, 5, 5, 5) > 0).astype(np.bool)
y = (np.random.randn(5) > 0).astype(np.bool)
node = onnx.helper.make_node(
'Xor',
inputs=['x', 'y'],
outputs=['xor'],
broadcast=1,
axis=0,
)
z = np.logical_xor(x, y[:, np.newaxis, np.newaxis, np.newaxis])
expect(node, inputs=[x, y], outputs=[z],
name='test_xor_axis0')
node = onnx.helper.make_node(
'Xor',
inputs=['x', 'y'],
outputs=['xor'],
broadcast=1,
axis=1,
)
z = np.logical_xor(x, y[:, np.newaxis, np.newaxis,])
expect(node, inputs=[x, y], outputs=[z],
name='test_xor_axis1')
node = onnx.helper.make_node(
'Xor',
inputs=['x', 'y'],
outputs=['xor'],
broadcast=1,
axis=2,
)
z = np.logical_xor(x, y[:, np.newaxis,])
expect(node, inputs=[x, y], outputs=[z],
name='test_xor_axis2')
node = onnx.helper.make_node(
'Xor',
inputs=['x', 'y'],
outputs=['xor'],
broadcast=1,
axis=3,
)
z = np.logical_xor(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_xor_axis3') | node = onnx.helper.make_node(
'Xor',
inputs=['x', 'y'],
outputs=['xor'],
)
# 2d
x = (np.random.randn(3, 4) > 0).astype(np.bool)
y = (np.random.randn(3, 4) > 0).astype(np.bool)
z = np.logical_xor(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_xor2d')
# 3d
x = (np.random.randn(3, 4, 5) > 0).astype(np.bool)
y = (np.random.randn(3, 4, 5) > 0).astype(np.bool)
z = np.logical_xor(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_xor3d')
# 4d
x = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)
y = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)
z = np.logical_xor(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_xor4d') |
use_context.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"errors"
"fmt"
"io"
"github.com/spf13/cobra"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
)
type useContextOptions struct {
configAccess clientcmd.ConfigAccess
contextName string
}
func NewCmdConfigUseContext(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command {
options := &useContextOptions{configAccess: configAccess}
cmd := &cobra.Command{
Use: "use-context CONTEXT_NAME",
Short: "Sets the current-context in a kubeconfig file",
Long: `Sets the current-context in a kubeconfig file`,
Run: func(cmd *cobra.Command, args []string) {
if !options.complete(cmd) {
return
}
err := options.run()
if err != nil {
fmt.Fprintf(out, "%v\n", err)
} else {
fmt.Fprintf(out, "switched to context %q.\n", options.contextName)
}
},
}
return cmd
}
func (o useContextOptions) run() error {
config, err := o.configAccess.GetStartingConfig()
if err != nil {
return err
}
err = o.validate(config)
if err != nil {
return err
}
config.CurrentContext = o.contextName
if err := clientcmd.ModifyConfig(o.configAccess, *config, true); err != nil {
return err
}
return nil
}
func (o *useContextOptions) complete(cmd *cobra.Command) bool {
endingArgs := cmd.Flags().Args()
if len(endingArgs) != 1 {
cmd.Help()
return false
}
o.contextName = endingArgs[0]
return true
}
func (o useContextOptions) validate(config *clientcmdapi.Config) error {
if len(o.contextName) == 0 { | for name := range config.Contexts {
if name == o.contextName {
return nil
}
}
return fmt.Errorf("no context exists with the name: %q.", o.contextName)
} | return errors.New("you must specify a current-context")
}
|
tokens.go | package irc
import (
"errors"
"fmt"
"strconv"
"strings"
"time"
)
// CasemapASCII of name is the canonical representation of name according to the
// ascii casemapping.
func CasemapASCII(name string) string {
var sb strings.Builder
sb.Grow(len(name))
for _, r := range name {
if 'A' <= r && r <= 'Z' {
r += 'a' - 'A'
}
sb.WriteRune(r)
}
return sb.String()
}
// CasemapRFC1459 of name is the canonical representation of name according to the
// rfc-1459 casemapping.
func CasemapRFC1459(name string) string {
var sb strings.Builder
sb.Grow(len(name))
for _, r := range name {
if 'A' <= r && r <= 'Z' {
r += 'a' - 'A'
} else if r == '[' {
r = '{'
} else if r == ']' {
r = '}'
} else if r == '\\' {
r = '|'
} else if r == '~' {
r = '^'
}
sb.WriteRune(r)
}
return sb.String()
}
// word returns the first word of s and the rest of s.
func word(s string) (word, rest string) {
split := strings.SplitN(s, " ", 2)
if len(split) < 2 {
word = split[0]
rest = ""
} else {
word = split[0]
rest = split[1]
}
return
}
// tagEscape returns the value of '\c' given c according to the message-tags
// specification.
func | (c rune) (escape rune) {
switch c {
case ':':
escape = ';'
case 's':
escape = ' '
case 'r':
escape = '\r'
case 'n':
escape = '\n'
default:
escape = c
}
return
}
// unescapeTagValue removes escapes from the given string and replaces them with
// their meaningful values.
func unescapeTagValue(escaped string) string {
var builder strings.Builder
builder.Grow(len(escaped))
escape := false
for _, c := range escaped {
if c == '\\' && !escape {
escape = true
} else {
var cpp rune
if escape {
cpp = tagEscape(c)
} else {
cpp = c
}
builder.WriteRune(cpp)
escape = false
}
}
return builder.String()
}
// escapeTagValue does the inverse operation of unescapeTagValue.
func escapeTagValue(unescaped string) string {
var sb strings.Builder
sb.Grow(len(unescaped) * 2)
for _, c := range unescaped {
switch c {
case ';':
sb.WriteRune('\\')
sb.WriteRune(':')
case ' ':
sb.WriteRune('\\')
sb.WriteRune('s')
case '\r':
sb.WriteRune('\\')
sb.WriteRune('r')
case '\n':
sb.WriteRune('\\')
sb.WriteRune('n')
case '\\':
sb.WriteRune('\\')
sb.WriteRune('\\')
default:
sb.WriteRune(c)
}
}
return sb.String()
}
func parseTags(s string) (tags map[string]string) {
s = s[1:]
tags = map[string]string{}
for _, item := range strings.Split(s, ";") {
if item == "" || item == "=" || item == "+" || item == "+=" {
continue
}
kv := strings.SplitN(item, "=", 2)
if len(kv) < 2 {
tags[kv[0]] = ""
} else {
tags[kv[0]] = unescapeTagValue(kv[1])
}
}
return
}
var (
errEmptyMessage = errors.New("empty message")
errIncompleteMessage = errors.New("message is incomplete")
)
type Prefix struct {
Name string
User string
Host string
}
// ParsePrefix parses a "nick!user@host" combination (or a prefix) from the given
// string.
func ParsePrefix(s string) (p *Prefix) {
if s == "" {
return
}
p = &Prefix{}
spl0 := strings.Split(s, "@")
if 1 < len(spl0) {
p.Host = spl0[1]
}
spl1 := strings.Split(spl0[0], "!")
if 1 < len(spl1) {
p.User = spl1[1]
}
p.Name = spl1[0]
return
}
// Copy makes a copy of the prefix, but doesn't copy the internal strings.
func (p *Prefix) Copy() *Prefix {
if p == nil {
return nil
}
res := &Prefix{}
*res = *p
return res
}
// String returns the "nick!user@host" representation of the prefix.
func (p *Prefix) String() string {
if p == nil {
return ""
}
if p.User != "" && p.Host != "" {
return p.Name + "!" + p.User + "@" + p.Host
} else if p.User != "" {
return p.Name + "!" + p.User
} else if p.Host != "" {
return p.Name + "@" + p.Host
} else {
return p.Name
}
}
// Message is the representation of an IRC message.
type Message struct {
Tags map[string]string
Prefix *Prefix
Command string
Params []string
}
func NewMessage(command string, params ...string) Message {
return Message{Command: command, Params: params}
}
// ParseMessage parses the message from the given string, which must be trimmed
// of "\r\n" beforehand.
func ParseMessage(line string) (msg Message, err error) {
line = strings.TrimLeft(line, " ")
if line == "" {
err = errEmptyMessage
return
}
if line[0] == '@' {
var tags string
tags, line = word(line)
msg.Tags = parseTags(tags)
}
line = strings.TrimLeft(line, " ")
if line == "" {
err = errIncompleteMessage
return
}
if line[0] == ':' {
var prefix string
prefix, line = word(line)
msg.Prefix = ParsePrefix(prefix[1:])
}
line = strings.TrimLeft(line, " ")
if line == "" {
err = errIncompleteMessage
return
}
msg.Command, line = word(line)
msg.Command = strings.ToUpper(msg.Command)
msg.Params = make([]string, 0, 15)
for line != "" {
if line[0] == ':' {
msg.Params = append(msg.Params, line[1:])
break
}
var param string
param, line = word(line)
msg.Params = append(msg.Params, param)
}
return
}
func (msg Message) WithTag(key, value string) Message {
if msg.Tags == nil {
msg.Tags = map[string]string{}
}
msg.Tags[key] = escapeTagValue(value)
return msg
}
// IsReply reports whether the message command is a server reply.
func (msg *Message) IsReply() bool {
if len(msg.Command) != 3 {
return false
}
for _, r := range msg.Command {
if !('0' <= r && r <= '9') {
return false
}
}
return true
}
// String returns the protocol representation of the message, without an ending
// "\r\n".
func (msg *Message) String() string {
var sb strings.Builder
if msg.Tags != nil {
sb.WriteRune('@')
for k, v := range msg.Tags {
sb.WriteString(k)
if v != "" {
sb.WriteRune('=')
sb.WriteString(escapeTagValue(v))
}
sb.WriteRune(';')
}
sb.WriteRune(' ')
}
if msg.Prefix != nil {
sb.WriteRune(':')
sb.WriteString(msg.Prefix.String())
sb.WriteRune(' ')
}
sb.WriteString(msg.Command)
if len(msg.Params) != 0 {
for _, p := range msg.Params[:len(msg.Params)-1] {
sb.WriteRune(' ')
sb.WriteString(p)
}
lastParam := msg.Params[len(msg.Params)-1]
if !strings.ContainsRune(lastParam, ' ') && !strings.HasPrefix(lastParam, ":") {
sb.WriteRune(' ')
sb.WriteString(lastParam)
} else {
sb.WriteRune(' ')
sb.WriteRune(':')
sb.WriteString(lastParam)
}
}
return sb.String()
}
// IsValid reports whether the message is correctly formed.
func (msg *Message) IsValid() bool {
switch msg.Command {
case "AUTHENTICATE", "PING", "PONG":
return 1 <= len(msg.Params)
case rplEndofnames, rplLoggedout, rplMotd, errNicknameinuse, rplNotopic, rplWelcome, rplYourhost:
return 2 <= len(msg.Params)
case rplIsupport, rplLoggedin, rplTopic, "FAIL", "WARN", "NOTE":
return 3 <= len(msg.Params)
case rplNamreply:
return 4 <= len(msg.Params)
case rplWhoreply:
return 8 <= len(msg.Params)
case "JOIN", "NICK", "PART", "TAGMSG":
return 1 <= len(msg.Params) && msg.Prefix != nil
case "KICK", "PRIVMSG", "NOTICE", "TOPIC":
return 2 <= len(msg.Params) && msg.Prefix != nil
case "QUIT":
return msg.Prefix != nil
case "CAP":
return 3 <= len(msg.Params) &&
(msg.Params[1] == "LS" ||
msg.Params[1] == "LIST" ||
msg.Params[1] == "ACK" ||
msg.Params[1] == "NAK" ||
msg.Params[1] == "NEW" ||
msg.Params[1] == "DEL")
case rplTopicwhotime:
if len(msg.Params) < 4 {
return false
}
_, err := strconv.ParseInt(msg.Params[3], 10, 64)
return err == nil
case "BATCH":
if len(msg.Params) < 1 {
return false
}
if len(msg.Params[0]) < 2 {
return false
}
if msg.Params[0][0] == '+' {
if len(msg.Params) < 2 {
return false
}
switch msg.Params[1] {
case "chathistory":
return 3 <= len(msg.Params)
default:
return false
}
}
return msg.Params[0][0] == '-'
default:
if len(msg.Command) != 3 || len(msg.Params) < 2 {
return false
}
_, err := strconv.Atoi(msg.Command)
return err == nil
}
}
// Time returns the time when the message has been sent, if present.
func (msg *Message) Time() (t time.Time, ok bool) {
var tag string
var year, month, day, hour, minute, second, millis int
tag, ok = msg.Tags["time"]
if !ok {
return
}
tag = strings.TrimSuffix(tag, "Z")
_, err := fmt.Sscanf(tag, "%4d-%2d-%2dT%2d:%2d:%2d.%3d", &year, &month, &day, &hour, &minute, &second, &millis)
if err != nil || month < 1 || 12 < month {
ok = false
return
}
t = time.Date(year, time.Month(month), day, hour, minute, second, millis*1e6, time.UTC)
return
}
// TimeOrNow returns the time when the message has been sent, or time.Now() if
// absent.
func (msg *Message) TimeOrNow() time.Time {
t, ok := msg.Time()
if ok {
return t
}
return time.Now().UTC()
}
// Severity is the severity of a server reply.
type Severity int
const (
SeverityNote Severity = iota
SeverityWarn
SeverityFail
)
// ReplySeverity returns the severity of a server reply.
func ReplySeverity(reply string) Severity {
switch reply[0] {
case '4', '5':
if reply == "422" {
return SeverityNote
} else {
return SeverityFail
}
case '9':
switch reply[2] {
case '2', '4', '5', '6', '7':
return SeverityFail
default:
return SeverityNote
}
default:
return SeverityNote
}
}
// Cap is a capability token in "CAP" server responses.
type Cap struct {
Name string
Value string
Enable bool
}
// ParseCaps parses the last argument (capability list) of "CAP LS/LIST/NEW/DEL"
// server responses.
func ParseCaps(caps string) (diff []Cap) {
for _, c := range strings.Split(caps, " ") {
if c == "" || c == "-" || c == "=" || c == "-=" {
continue
}
var item Cap
if strings.HasPrefix(c, "-") {
item.Enable = false
c = c[1:]
} else {
item.Enable = true
}
kv := strings.SplitN(c, "=", 2)
item.Name = strings.ToLower(kv[0])
if len(kv) > 1 {
item.Value = kv[1]
}
diff = append(diff, item)
}
return
}
// Member is a token in RPL_NAMREPLY's last parameter.
type Member struct {
PowerLevel string
Name *Prefix
}
type members []Member
func (m members) Len() int {
return len(m)
}
func (m members) Less(i, j int) bool {
return strings.ToLower(m[i].Name.Name) < strings.ToLower(m[j].Name.Name)
}
func (m members) Swap(i, j int) {
m[i], m[j] = m[j], m[i]
}
// ParseNameReply parses the last parameter of RPL_NAMREPLY, according to the
// membership prefixes of the server.
func ParseNameReply(trailing string, prefixes string) (names []Member) {
for _, word := range strings.Split(trailing, " ") {
if word == "" {
continue
}
name := strings.TrimLeft(word, prefixes)
names = append(names, Member{
PowerLevel: word[:len(word)-len(name)],
Name: ParsePrefix(name),
})
}
return
}
| tagEscape |
fs.go | // Copyright (c) 2021 roc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package fs
import (
"errors"
"os"
"path"
"strings"
)
func open(name string, perm os.FileMode) (*os.File, error) {
return os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_SYNC, perm)
}
func isLink(filename string) (string, error) {
fi, err := os.Lstat(filename)
if err != nil {
return "", err
}
if fi.Mode()&os.ModeSymlink != 0 {
name, err := os.Readlink(filename)
if err != nil {
return "", err
}
return name, nil
}
return "", errors.New("not symlink")
}
func pathIsExist(path string) bool |
func getFilenamePrefix(s string) string {
return strings.TrimSuffix(path.Base(s), ".log")
}
| {
_, err := os.Stat(path)
if err != nil {
return os.IsExist(err)
}
return true
} |
server.ts | /// <reference types="node" />
/// <reference path="shared.ts" />
/// <reference path="session.ts" />
namespace ts.server {
interface IOSessionOptions {
host: ServerHost;
cancellationToken: ServerCancellationToken;
canUseEvents: boolean;
installerEventPort: number;
useSingleInferredProject: boolean;
useInferredProjectPerProjectRoot: boolean;
disableAutomaticTypingAcquisition: boolean;
globalTypingsCacheLocation: string;
logger: Logger;
typingSafeListLocation: string;
typesMapLocation: string | undefined;
npmLocation: string | undefined;
telemetryEnabled: boolean;
globalPlugins: ReadonlyArray<string>;
pluginProbeLocations: ReadonlyArray<string>;
allowLocalPluginLoads: boolean;
}
const net: {
connect(options: { port: number }, onConnect?: () => void): NodeSocket
} = require("net");
const childProcess: {
fork(modulePath: string, args: string[], options?: { execArgv: string[], env?: MapLike<string> }): NodeChildProcess;
execFileSync(file: string, args: string[], options: { stdio: "ignore", env: MapLike<string> }): string | Buffer;
} = require("child_process");
const os: {
homedir?(): string;
tmpdir(): string;
} = require("os");
function getGlobalTypingsCacheLocation() {
switch (process.platform) {
case "win32": {
const basePath = process.env.LOCALAPPDATA ||
process.env.APPDATA ||
(os.homedir && os.homedir()) ||
process.env.USERPROFILE ||
(process.env.HOMEDRIVE && process.env.HOMEPATH && normalizeSlashes(process.env.HOMEDRIVE + process.env.HOMEPATH)) ||
os.tmpdir();
return combinePaths(combinePaths(normalizeSlashes(basePath), "Microsoft/TypeScript"), versionMajorMinor);
}
case "openbsd":
case "freebsd":
case "darwin":
case "linux":
case "android": {
const cacheLocation = getNonWindowsCacheLocation(process.platform === "darwin");
return combinePaths(combinePaths(cacheLocation, "typescript"), versionMajorMinor);
}
default:
Debug.fail(`unsupported platform '${process.platform}'`);
return;
}
}
function getNonWindowsCacheLocation(platformIsDarwin: boolean) {
if (process.env.XDG_CACHE_HOME) {
return process.env.XDG_CACHE_HOME;
}
const usersDir = platformIsDarwin ? "Users" : "home";
const homePath = (os.homedir && os.homedir()) ||
process.env.HOME ||
((process.env.LOGNAME || process.env.USER) && `/${usersDir}/${process.env.LOGNAME || process.env.USER}`) ||
os.tmpdir();
const cacheFolder = platformIsDarwin
? "Library/Caches"
: ".cache";
return combinePaths(normalizeSlashes(homePath), cacheFolder);
}
interface NodeChildProcess {
send(message: any, sendHandle?: any): void;
on(message: "message" | "exit", f: (m: any) => void): void;
kill(): void;
pid: number;
}
interface NodeSocket {
write(data: string, encoding: string): boolean;
}
interface ReadLineOptions {
input: NodeJS.ReadableStream;
output?: NodeJS.WritableStream;
terminal?: boolean;
historySize?: number;
}
interface Stats {
isFile(): boolean;
isDirectory(): boolean;
isBlockDevice(): boolean;
isCharacterDevice(): boolean;
isSymbolicLink(): boolean;
isFIFO(): boolean;
isSocket(): boolean;
dev: number;
ino: number;
mode: number;
nlink: number;
uid: number;
gid: number;
rdev: number;
size: number;
blksize: number;
blocks: number;
atime: Date;
mtime: Date;
ctime: Date;
birthtime: Date;
}
const readline: {
createInterface(options: ReadLineOptions): NodeJS.EventEmitter;
} = require("readline");
const fs: {
openSync(path: string, options: string): number;
close(fd: number): void;
writeSync(fd: number, buffer: Buffer, offset: number, length: number, position?: number): number;
writeSync(fd: number, data: any, position?: number, enconding?: string): number;
statSync(path: string): Stats;
stat(path: string, callback?: (err: NodeJS.ErrnoException, stats: Stats) => any): void;
} = require("fs");
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
terminal: false,
});
class Logger implements server.Logger {
private fd = -1;
private seq = 0;
private inGroup = false;
private firstInGroup = true;
constructor(private readonly logFilename: string,
private readonly traceToConsole: boolean,
private readonly level: LogLevel) {
if (this.logFilename) {
try {
this.fd = fs.openSync(this.logFilename, "w");
}
catch (_) {
// swallow the error and keep logging disabled if file cannot be opened
}
}
}
static padStringRight(str: string, padding: string) {
return (str + padding).slice(0, padding.length);
}
close() {
if (this.fd >= 0) {
fs.close(this.fd);
}
}
getLogFileName() {
return this.logFilename;
}
perftrc(s: string) {
this.msg(s, Msg.Perf);
}
info(s: string) {
this.msg(s, Msg.Info);
}
err(s: string) {
this.msg(s, Msg.Err);
}
startGroup() {
this.inGroup = true;
this.firstInGroup = true;
}
endGroup() {
this.inGroup = false;
}
loggingEnabled() {
return !!this.logFilename || this.traceToConsole;
}
hasLevel(level: LogLevel) {
return this.loggingEnabled() && this.level >= level;
}
msg(s: string, type: Msg.Types = Msg.Err) {
if (!this.canWrite) return;
s = `[${nowString()}] ${s}\n`;
if (!this.inGroup || this.firstInGroup) {
const prefix = Logger.padStringRight(type + " " + this.seq.toString(), " ");
s = prefix + s;
}
this.write(s);
if (!this.inGroup) {
this.seq++;
}
}
private get canWrite() {
return this.fd >= 0 || this.traceToConsole;
}
private write(s: string) {
if (this.fd >= 0) {
const buf = new Buffer(s);
// tslint:disable-next-line no-null-keyword
fs.writeSync(this.fd, buf, 0, buf.length, /*position*/ null);
}
if (this.traceToConsole) {
console.warn(s);
}
}
}
// E.g. "12:34:56.789"
function nowString() {
const d = new Date();
return `${d.getHours()}:${d.getMinutes()}:${d.getSeconds()}.${d.getMilliseconds()}`;
}
class NodeTypingsInstaller implements ITypingsInstaller {
private installer: NodeChildProcess;
private installerPidReported = false;
private socket: NodeSocket;
private projectService: ProjectService;
private throttledOperations: ThrottledOperations;
private eventSender: EventSender;
constructor(
private readonly telemetryEnabled: boolean,
private readonly logger: server.Logger,
host: ServerHost,
eventPort: number,
readonly globalTypingsCacheLocation: string,
readonly typingSafeListLocation: string,
readonly typesMapLocation: string,
private readonly npmLocation: string | undefined,
private newLine: string) {
this.throttledOperations = new ThrottledOperations(host);
if (eventPort) {
const s = net.connect({ port: eventPort }, () => {
this.socket = s;
this.reportInstallerProcessId();
});
}
}
private reportInstallerProcessId() {
if (this.installerPidReported) {
return;
}
if (this.socket && this.installer) {
this.sendEvent(0, "typingsInstallerPid", { pid: this.installer.pid });
this.installerPidReported = true;
}
}
private sendEvent(seq: number, event: string, body: any): void {
this.socket.write(formatMessage({ seq, type: "event", event, body }, this.logger, Buffer.byteLength, this.newLine), "utf8");
}
setTelemetrySender(telemetrySender: EventSender) {
this.eventSender = telemetrySender;
}
attach(projectService: ProjectService) {
this.projectService = projectService;
if (this.logger.hasLevel(LogLevel.requestTime)) {
this.logger.info("Binding...");
}
const args: string[] = [Arguments.GlobalCacheLocation, this.globalTypingsCacheLocation];
if (this.telemetryEnabled) {
args.push(Arguments.EnableTelemetry);
}
if (this.logger.loggingEnabled() && this.logger.getLogFileName()) {
args.push(Arguments.LogFile, combinePaths(getDirectoryPath(normalizeSlashes(this.logger.getLogFileName())), `ti-${process.pid}.log`));
}
if (this.typingSafeListLocation) {
args.push(Arguments.TypingSafeListLocation, this.typingSafeListLocation);
}
if (this.typesMapLocation) {
args.push(Arguments.TypesMapLocation, this.typesMapLocation);
}
if (this.npmLocation) {
args.push(Arguments.NpmLocation, this.npmLocation);
}
const execArgv: string[] = [];
for (const arg of process.execArgv) {
const match = /^--(debug|inspect)(=(\d+))?$/.exec(arg);
if (match) {
// if port is specified - use port + 1
// otherwise pick a default port depending on if 'debug' or 'inspect' and use its value + 1
const currentPort = match[3] !== undefined
? +match[3]
: match[1] === "debug" ? 5858 : 9229;
execArgv.push(`--${match[1]}=${currentPort + 1}`);
break;
}
}
this.installer = childProcess.fork(combinePaths(__dirname, "typingsInstaller.js"), args, { execArgv });
this.installer.on("message", m => this.handleMessage(m));
this.reportInstallerProcessId();
process.on("exit", () => {
this.installer.kill();
});
}
onProjectClosed(p: Project): void {
this.installer.send({ projectName: p.getProjectName(), kind: "closeProject" });
}
enqueueInstallTypingsRequest(project: Project, typeAcquisition: TypeAcquisition, unresolvedImports: SortedReadonlyArray<string>): void {
const request = createInstallTypingsRequest(project, typeAcquisition, unresolvedImports);
if (this.logger.hasLevel(LogLevel.verbose)) {
if (this.logger.hasLevel(LogLevel.verbose)) {
this.logger.info(`Scheduling throttled operation: ${JSON.stringify(request)}`);
}
}
this.throttledOperations.schedule(project.getProjectName(), /*ms*/ 250, () => {
if (this.logger.hasLevel(LogLevel.verbose)) {
this.logger.info(`Sending request: ${JSON.stringify(request)}`);
}
this.installer.send(request);
});
}
private handleMessage(response: SetTypings | InvalidateCachedTypings | BeginInstallTypes | EndInstallTypes | InitializationFailedResponse) {
if (this.logger.hasLevel(LogLevel.verbose)) {
this.logger.info(`Received response: ${JSON.stringify(response)}`);
}
if (response.kind === EventInitializationFailed) {
if (!this.eventSender) {
return;
}
const body: protocol.TypesInstallerInitializationFailedEventBody = {
message: response.message
};
const eventName: protocol.TypesInstallerInitializationFailedEventName = "typesInstallerInitializationFailed";
this.eventSender.event(body, eventName);
return;
}
if (response.kind === EventBeginInstallTypes) {
if (!this.eventSender) {
return;
}
const body: protocol.BeginInstallTypesEventBody = {
eventId: response.eventId,
packages: response.packagesToInstall,
};
const eventName: protocol.BeginInstallTypesEventName = "beginInstallTypes";
this.eventSender.event(body, eventName);
return;
}
if (response.kind === EventEndInstallTypes) {
if (!this.eventSender) {
return;
}
if (this.telemetryEnabled) {
const body: protocol.TypingsInstalledTelemetryEventBody = {
telemetryEventName: "typingsInstalled",
payload: {
installedPackages: response.packagesToInstall.join(","),
installSuccess: response.installSuccess,
typingsInstallerVersion: response.typingsInstallerVersion
}
};
const eventName: protocol.TelemetryEventName = "telemetry";
this.eventSender.event(body, eventName);
}
const body: protocol.EndInstallTypesEventBody = {
eventId: response.eventId,
packages: response.packagesToInstall,
success: response.installSuccess,
};
const eventName: protocol.EndInstallTypesEventName = "endInstallTypes";
this.eventSender.event(body, eventName);
return;
}
this.projectService.updateTypingsForProject(response);
if (response.kind === ActionSet && this.socket) {
this.sendEvent(0, "setTypings", response);
}
}
}
class IOSession extends Session {
constructor(options: IOSessionOptions) {
const { host, installerEventPort, globalTypingsCacheLocation, typingSafeListLocation, typesMapLocation, npmLocation, canUseEvents } = options;
const typingsInstaller = disableAutomaticTypingAcquisition
? undefined
: new NodeTypingsInstaller(telemetryEnabled, logger, host, installerEventPort, globalTypingsCacheLocation, typingSafeListLocation, typesMapLocation, npmLocation, host.newLine);
super({
host,
cancellationToken,
useSingleInferredProject,
useInferredProjectPerProjectRoot,
typingsInstaller: typingsInstaller || nullTypingsInstaller,
byteLength: Buffer.byteLength,
hrtime: process.hrtime,
logger,
canUseEvents,
globalPlugins: options.globalPlugins,
pluginProbeLocations: options.pluginProbeLocations,
allowLocalPluginLoads: options.allowLocalPluginLoads });
if (telemetryEnabled && typingsInstaller) {
typingsInstaller.setTelemetrySender(this);
}
}
exit() {
this.logger.info("Exiting...");
this.projectService.closeLog();
process.exit(0);
}
listen() {
rl.on("line", (input: string) => {
const message = input.trim();
this.onMessage(message);
});
rl.on("close", () => {
this.exit();
});
}
}
interface LogOptions {
file?: string;
detailLevel?: LogLevel;
traceToConsole?: boolean;
logToFile?: boolean;
}
function | (logEnvStr: string): LogOptions {
if (!logEnvStr) {
return {};
}
const logEnv: LogOptions = { logToFile: true };
const args = logEnvStr.split(" ");
const len = args.length - 1;
for (let i = 0; i < len; i += 2) {
const option = args[i];
const value = args[i + 1];
if (option && value) {
switch (option) {
case "-file":
logEnv.file = stripQuotes(value);
break;
case "-level":
const level = getLogLevel(value);
logEnv.detailLevel = level !== undefined ? level : LogLevel.normal;
break;
case "-traceToConsole":
logEnv.traceToConsole = value.toLowerCase() === "true";
break;
case "-logToFile":
logEnv.logToFile = value.toLowerCase() === "true";
break;
}
}
}
return logEnv;
}
function getLogLevel(level: string) {
if (level) {
const l = level.toLowerCase();
for (const name in LogLevel) {
if (isNaN(+name) && l === name.toLowerCase()) {
return <LogLevel><any>LogLevel[name];
}
}
}
return undefined;
}
// TSS_LOG "{ level: "normal | verbose | terse", file?: string}"
function createLogger() {
const cmdLineLogFileName = findArgument("--logFile");
const cmdLineVerbosity = getLogLevel(findArgument("--logVerbosity"));
const envLogOptions = parseLoggingEnvironmentString(process.env["TSS_LOG"]);
const logFileName = cmdLineLogFileName
? stripQuotes(cmdLineLogFileName)
: envLogOptions.logToFile
? envLogOptions.file || (__dirname + "/.log" + process.pid.toString())
: undefined;
const logVerbosity = cmdLineVerbosity || envLogOptions.detailLevel;
return new Logger(logFileName, envLogOptions.traceToConsole, logVerbosity);
}
// This places log file in the directory containing editorServices.js
// TODO: check that this location is writable
// average async stat takes about 30 microseconds
// set chunk size to do 30 files in < 1 millisecond
function createPollingWatchedFileSet(interval = 2500, chunkSize = 30) {
const watchedFiles: WatchedFile[] = [];
let nextFileToCheck = 0;
let watchTimer: any;
return { getModifiedTime, poll, startWatchTimer, addFile, removeFile };
function getModifiedTime(fileName: string): Date {
return fs.statSync(fileName).mtime;
}
function poll(checkedIndex: number) {
const watchedFile = watchedFiles[checkedIndex];
if (!watchedFile) {
return;
}
fs.stat(watchedFile.fileName, (err: any, stats: any) => {
if (err) {
watchedFile.callback(watchedFile.fileName, FileWatcherEventKind.Changed);
}
else {
const oldTime = watchedFile.mtime.getTime();
const newTime = stats.mtime.getTime();
if (oldTime !== newTime) {
watchedFile.mtime = stats.mtime;
const eventKind = oldTime === 0
? FileWatcherEventKind.Created
: newTime === 0
? FileWatcherEventKind.Deleted
: FileWatcherEventKind.Changed;
watchedFile.callback(watchedFile.fileName, eventKind);
}
}
});
}
// this implementation uses polling and
// stat due to inconsistencies of fs.watch
// and efficiency of stat on modern filesystems
function startWatchTimer() {
watchTimer = setInterval(() => {
let count = 0;
let nextToCheck = nextFileToCheck;
let firstCheck = -1;
while ((count < chunkSize) && (nextToCheck !== firstCheck)) {
poll(nextToCheck);
if (firstCheck < 0) {
firstCheck = nextToCheck;
}
nextToCheck++;
if (nextToCheck === watchedFiles.length) {
nextToCheck = 0;
}
count++;
}
nextFileToCheck = nextToCheck;
}, interval);
}
function addFile(fileName: string, callback: FileWatcherCallback): WatchedFile {
const file: WatchedFile = {
fileName,
callback,
mtime: sys.fileExists(fileName)
? getModifiedTime(fileName)
: new Date(0) // Any subsequent modification will occur after this time
};
watchedFiles.push(file);
if (watchedFiles.length === 1) {
startWatchTimer();
}
return file;
}
function removeFile(file: WatchedFile) {
unorderedRemoveItem(watchedFiles, file);
}
}
// REVIEW: for now this implementation uses polling.
// The advantage of polling is that it works reliably
// on all os and with network mounted files.
// For 90 referenced files, the average time to detect
// changes is 2*msInterval (by default 5 seconds).
// The overhead of this is .04 percent (1/2500) with
// average pause of < 1 millisecond (and max
// pause less than 1.5 milliseconds); question is
// do we anticipate reference sets in the 100s and
// do we care about waiting 10-20 seconds to detect
// changes for large reference sets? If so, do we want
// to increase the chunk size or decrease the interval
// time dynamically to match the large reference set?
const pollingWatchedFileSet = createPollingWatchedFileSet();
const pending: Buffer[] = [];
let canWrite = true;
function writeMessage(buf: Buffer) {
if (!canWrite) {
pending.push(buf);
}
else {
canWrite = false;
process.stdout.write(buf, setCanWriteFlagAndWriteMessageIfNecessary);
}
}
function setCanWriteFlagAndWriteMessageIfNecessary() {
canWrite = true;
if (pending.length) {
writeMessage(pending.shift());
}
}
function extractWatchDirectoryCacheKey(path: string, currentDriveKey: string) {
path = normalizeSlashes(path);
if (isUNCPath(path)) {
// UNC path: extract server name
// //server/location
// ^ <- from 0 to this position
const firstSlash = path.indexOf(directorySeparator, 2);
return firstSlash !== -1 ? path.substring(0, firstSlash).toLowerCase() : path;
}
const rootLength = getRootLength(path);
if (rootLength === 0) {
// relative path - assume file is on the current drive
return currentDriveKey;
}
if (path.charCodeAt(1) === CharacterCodes.colon && path.charCodeAt(2) === CharacterCodes.slash) {
// rooted path that starts with c:/... - extract drive letter
return path.charAt(0).toLowerCase();
}
if (path.charCodeAt(0) === CharacterCodes.slash && path.charCodeAt(1) !== CharacterCodes.slash) {
// rooted path that starts with slash - /somename - use key for current drive
return currentDriveKey;
}
// do not cache any other cases
return undefined;
}
function isUNCPath(s: string): boolean {
return s.length > 2 && s.charCodeAt(0) === CharacterCodes.slash && s.charCodeAt(1) === CharacterCodes.slash;
}
const logger = createLogger();
const sys = <ServerHost>ts.sys;
// use watchGuard process on Windows when node version is 4 or later
const useWatchGuard = process.platform === "win32" && getNodeMajorVersion() >= 4;
if (useWatchGuard) {
const currentDrive = extractWatchDirectoryCacheKey(sys.resolvePath(sys.getCurrentDirectory()), /*currentDriveKey*/ undefined);
const statusCache = createMap<boolean>();
const originalWatchDirectory = sys.watchDirectory;
sys.watchDirectory = function (path: string, callback: DirectoryWatcherCallback, recursive?: boolean): FileWatcher {
const cacheKey = extractWatchDirectoryCacheKey(path, currentDrive);
let status = cacheKey && statusCache.get(cacheKey);
if (status === undefined) {
if (logger.hasLevel(LogLevel.verbose)) {
logger.info(`${cacheKey} for path ${path} not found in cache...`);
}
try {
const args = [combinePaths(__dirname, "watchGuard.js"), path];
if (logger.hasLevel(LogLevel.verbose)) {
logger.info(`Starting ${process.execPath} with args ${JSON.stringify(args)}`);
}
childProcess.execFileSync(process.execPath, args, { stdio: "ignore", env: { "ELECTRON_RUN_AS_NODE": "1" } });
status = true;
if (logger.hasLevel(LogLevel.verbose)) {
logger.info(`WatchGuard for path ${path} returned: OK`);
}
}
catch (e) {
status = false;
if (logger.hasLevel(LogLevel.verbose)) {
logger.info(`WatchGuard for path ${path} returned: ${e.message}`);
}
}
if (cacheKey) {
statusCache.set(cacheKey, status);
}
}
else if (logger.hasLevel(LogLevel.verbose)) {
logger.info(`watchDirectory for ${path} uses cached drive information.`);
}
if (status) {
// this drive is safe to use - call real 'watchDirectory'
return originalWatchDirectory.call(sys, path, callback, recursive);
}
else {
// this drive is unsafe - return no-op watcher
return { close() { } };
}
};
}
// Override sys.write because fs.writeSync is not reliable on Node 4
sys.write = (s: string) => writeMessage(new Buffer(s, "utf8"));
sys.watchFile = (fileName, callback) => {
const watchedFile = pollingWatchedFileSet.addFile(fileName, callback);
return {
close: () => pollingWatchedFileSet.removeFile(watchedFile)
};
};
sys.setTimeout = setTimeout;
sys.clearTimeout = clearTimeout;
sys.setImmediate = setImmediate;
sys.clearImmediate = clearImmediate;
if (typeof global !== "undefined" && global.gc) {
sys.gc = () => global.gc();
}
sys.require = (initialDir: string, moduleName: string): RequireResult => {
try {
return { module: require(resolveJavaScriptModule(moduleName, initialDir, sys)), error: undefined };
}
catch (error) {
return { module: undefined, error };
}
};
let cancellationToken: ServerCancellationToken;
try {
const factory = require("./cancellationToken");
cancellationToken = factory(sys.args);
}
catch (e) {
cancellationToken = nullCancellationToken;
}
let eventPort: number;
{
const str = findArgument("--eventPort");
const v = str && parseInt(str);
if (!isNaN(v)) {
eventPort = v;
}
}
const localeStr = findArgument("--locale");
if (localeStr) {
validateLocaleAndSetLanguage(localeStr, sys);
}
setStackTraceLimit();
const typingSafeListLocation = findArgument(Arguments.TypingSafeListLocation);
const typesMapLocation = findArgument(Arguments.TypesMapLocation) || combinePaths(sys.getExecutingFilePath(), "../typesMap.json");
const npmLocation = findArgument(Arguments.NpmLocation);
function parseStringArray(argName: string): ReadonlyArray<string> {
const arg = findArgument(argName);
if (arg === undefined) {
return emptyArray;
}
return arg.split(",").filter(name => name !== "");
}
const globalPlugins = parseStringArray("--globalPlugins");
const pluginProbeLocations = parseStringArray("--pluginProbeLocations");
const allowLocalPluginLoads = hasArgument("--allowLocalPluginLoads");
const useSingleInferredProject = hasArgument("--useSingleInferredProject");
const useInferredProjectPerProjectRoot = hasArgument("--useInferredProjectPerProjectRoot");
const disableAutomaticTypingAcquisition = hasArgument("--disableAutomaticTypingAcquisition");
const telemetryEnabled = hasArgument(Arguments.EnableTelemetry);
const options: IOSessionOptions = {
host: sys,
cancellationToken,
installerEventPort: eventPort,
canUseEvents: eventPort === undefined,
useSingleInferredProject,
useInferredProjectPerProjectRoot,
disableAutomaticTypingAcquisition,
globalTypingsCacheLocation: getGlobalTypingsCacheLocation(),
typingSafeListLocation,
typesMapLocation,
npmLocation,
telemetryEnabled,
logger,
globalPlugins,
pluginProbeLocations,
allowLocalPluginLoads
};
const ioSession = new IOSession(options);
process.on("uncaughtException", function (err: Error) {
ioSession.logError(err, "unknown");
});
// See https://github.com/Microsoft/TypeScript/issues/11348
(process as any).noAsar = true;
// Start listening
ioSession.listen();
}
| parseLoggingEnvironmentString |
mod.rs | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::IMON0 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct IACCESS_COUNTR {
bits: u32,
}
impl IACCESS_COUNTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _IACCESS_COUNTW<'a> {
w: &'a mut W,
}
impl<'a> _IACCESS_COUNTW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u32) -> &'a mut W { | const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:31 - Total accesses to Instruction cache"]
#[inline]
pub fn iaccess_count(&self) -> IACCESS_COUNTR {
let bits = {
const MASK: u32 = 4294967295;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u32
};
IACCESS_COUNTR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:31 - Total accesses to Instruction cache"]
#[inline]
pub fn iaccess_count(&mut self) -> _IACCESS_COUNTW {
_IACCESS_COUNTW { w: self }
}
} | const MASK: u32 = 4294967295; |
converter.pipe.ts | import { Pipe, PipeTransform } from '@angular/core';
@Pipe({
name: 'convert'
})
export class ConvertPipe implements PipeTransform {
transform(value: number, ...args: any[]): any {
const [from, to] = args;
return value && this.metersPerSecondTo(value, from, to);
}
private metersPerSecondTo(value: number, from: string, to: string): string {
if (from === 'mps') {
if (to === 'kmh') {
return Math.round(value * 3.6) + ' km/h';
}
}
return Math.round(value) + ' km/h';
} | } | |
test_regression.py | from __future__ import division, absolute_import, print_function
import copy
import pickle
import sys
import platform
import gc
import warnings
import tempfile
from os import path
from io import BytesIO
from itertools import chain
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, assert_warns, dec
)
from numpy.testing.utils import _assert_valid_refcount
from numpy.compat import asbytes, asunicode, asbytes_nested, long, sixu
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self,level=rlevel):
# Ticket #3
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
def test_mem_empty(self,level=rlevel):
# Ticket #7
np.empty((1,), dtype=[('x', np.int64)])
def test_pickle_transposed(self,level=rlevel):
# Ticket #16
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
f = BytesIO()
pickle.dump(a, f)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a, b)
def test_typeNA(self,level=rlevel):
# Ticket #31
assert_equal(np.typeNA[np.int64], 'Int64')
assert_equal(np.typeNA[np.uint64], 'UInt64')
def test_dtype_names(self,level=rlevel):
# Ticket #35
# Should succeed
np.dtype([(('name', 'label'), np.int32, 3)])
def test_reduce(self,level=rlevel):
# Ticket #40
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
def test_zeros_order(self,level=rlevel):
# Ticket #43
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_asarray_with_order(self,level=rlevel):
# Check that nothing is done when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
def test_ravel_with_order(self,level=rlevel):
# Check that ravel works when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
def test_sort_bigendian(self,level=rlevel):
# Ticket #47
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self,level=rlevel):
# Ticket #49
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self,level=rlevel):
# Ticket #50
f = BytesIO()
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self,level=rlevel):
# Ticket #58.
a = np.zeros((5, 3))
b = a[:, :2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError, rs)
def test_bool(self,level=rlevel):
# Ticket #60
np.bool_(1) # Should succeed
def test_indexing1(self,level=rlevel):
# Ticket #64
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self,level=rlevel):
# Ticket #65
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self,level=rlevel):
# Ticket #67
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self,level=rlevel):
# Trac Ticket #72
# https://github.com/numpy/numpy/issues/565
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
self.assertTrue(a[1] == 'auto')
self.assertTrue(a[0] != 'auto')
b = np.linspace(0, 10, 11)
# This should return true for now, but will eventually raise an error:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.assertTrue(b != 'auto')
self.assertTrue(b[0] != 'auto')
def test_unicode_swapping(self,level=rlevel):
# Ticket #79
ulen = 1
ucs_value = sixu('\U0010FFFF')
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua.newbyteorder() # Should succeed.
def test_object_array_fill(self,level=rlevel):
# Ticket #86
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self,level=rlevel):
# Ticket #93
self.assertRaises(TypeError, np.dtype,
{'names':['a'],'formats':['foo']}, align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
(sys.platform == "win32" and
platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
def test_intp(self,level=rlevel):
# Ticket #99
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width, 16)
self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
self.assertRaises(ValueError, np.intp, '0x1', 32)
assert_equal(255, np.intp('0xFF', 16))
assert_equal(1024, np.intp(1024))
def test_endian_bool_indexing(self,level=rlevel):
# Ticket #105
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
xa = np.where((a > 2) & (a < 6))
xb = np.where((b > 2) & (b < 6))
ya = ((a > 2) & (a < 6))
yb = ((b > 2) & (b < 6))
assert_array_almost_equal(xa, ya.nonzero())
assert_array_almost_equal(xb, yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_endian_where(self,level=rlevel):
# GitHub issue #369
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
net[2] = 0.605202
max_net = net.max()
test = np.where(net <= 0., max_net, net)
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
def test_endian_recarray(self,level=rlevel):
# Ticket #2185
dt = np.dtype([
('head', '>u4'),
('data', '>u4', 2),
])
buf = np.recarray(1, dtype=dt)
buf[0]['head'] = 1
buf[0]['data'][:] = [1, 1]
h = buf[0]['head']
d = buf[0]['data'][0]
buf[0]['head'] = h
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
def test_mem_dot(self,level=rlevel):
# Ticket #106
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
# Dummy array to detect bad memory access:
_z = np.ones(10)
_dummy = np.empty((0, 10))
z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
np.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
# Do the same for the built-in dot:
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
def test_arange_endian(self,level=rlevel):
# Ticket #111
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
assert_array_equal(ref, x)
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
def test_argmax(self,level=rlevel):
# Ticket #119
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
a.argmax(i) # Should succeed
def test_mem_divmod(self,level=rlevel):
# Ticket #126
for i in range(10):
divmod(np.array([i])[0], 10)
def test_hstack_invalid_dims(self,level=rlevel):
# Ticket #128
x = np.arange(9).reshape((3, 3))
y = np.array([0, 0, 0])
self.assertRaises(ValueError, np.hstack, (x, y))
def test_squeeze_type(self,level=rlevel):
# Ticket #133
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self,level=rlevel):
# Ticket #143
assert_equal(0, np.add.identity)
def test_numpy_float_python_long_addition(self):
# Check that numpy float and python longs can be added correctly.
a = np.float_(23.) + 2**135
assert_equal(a, 23. + 2**135)
def test_binary_repr_0(self,level=rlevel):
# Ticket #151
assert_equal('0', np.binary_repr(0))
def test_rec_iterate(self,level=rlevel):
# Ticket #160
descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
x = np.rec.array([(1, 1.1, '1.0'),
(2, 2.2, '2.0')], dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self,level=rlevel):
# Ticket #190
a = np.array('hello', np.unicode_)
b = np.array('world')
a == b
def test_tobytes_FORTRANORDER_discontiguous(self,level=rlevel):
# Fix in r2836
# Create non-contiguous Fortran ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes()))
def test_flat_assignment(self,level=rlevel):
# Correct behaviour of ticket #194
x = np.empty((3, 1))
x.flat = np.arange(3)
assert_array_almost_equal(x, [[0], [1], [2]])
x.flat = np.arange(3, dtype=float)
assert_array_almost_equal(x, [[0], [1], [2]])
def test_broadcast_flat_assignment(self,level=rlevel):
# Ticket #194
x = np.empty((3, 1))
def bfa():
x[:] = np.arange(3)
def bfb():
x[:] = np.arange(3, dtype=float)
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
def test_nonarray_assignment(self):
# See also Issue gh-2870, test for non-array assignment
# and equivalent unsafe casted array assignment
a = np.arange(10)
b = np.ones(10, dtype=bool)
r = np.arange(10)
def assign(a, b, c):
a[b] = c
assert_raises(ValueError, assign, a, b, np.nan)
a[b] = np.array(np.nan) # but not this.
assert_raises(ValueError, assign, a, r, np.nan)
a[r] = np.array(np.nan)
def test_unpickle_dtype_with_object(self,level=rlevel):
# Implemented in r2840
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
f = BytesIO()
pickle.dump(dt, f)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt, dt_)
def test_mem_array_creation_invalid_specification(self,level=rlevel):
# Ticket #196
dt = np.dtype([('x', int), ('y', np.object_)])
# Wrong way
self.assertRaises(ValueError, np.array, [1, 'object'], dt)
# Correct way
np.array([(1, 'object')], dt)
def test_recarray_single_element(self,level=rlevel):
# Ticket #202
a = np.array([1, 2, 3], dtype=np.int32)
b = a.copy()
r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])
assert_array_equal(a, b)
assert_equal(a, r[0][0])
def test_zero_sized_array_indexing(self,level=rlevel):
# Ticket #205
tmp = np.array([])
def index_tmp():
tmp[np.array(10)]
self.assertRaises(IndexError, index_tmp)
def test_chararray_rstrip(self,level=rlevel):
# Ticket #222
x = np.chararray((1,), 5)
x[0] = asbytes('a ')
x = x.rstrip()
assert_equal(x[0], asbytes('a'))
def test_object_array_shape(self,level=rlevel):
# Ticket #239
assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))
assert_equal(np.array([], dtype=object).shape, (0,))
assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
def test_mem_around(self,level=rlevel):
# Ticket #243
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y), decimal) <= 10.0**(-decimal)
def test_character_array_strip(self,level=rlevel):
# Ticket #246
x = np.char.array(("x", "x ", "x "))
for c in x:
assert_equal(c, "x")
def test_lexsort(self,level=rlevel):
# Lexsort memory error
v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(np.lexsort(v), 0)
def test_lexsort_invalid_sequence(self):
# Issue gh-4123
class BuggySequence(object):
def __len__(self):
return 4
def __getitem__(self, key):
raise KeyError
assert_raises(KeyError, np.lexsort, BuggySequence())
def test_pickle_py2_bytes_encoding(self):
# Check that arrays and scalars pickled on Py2 are
# unpickleable on Py3 using encoding='bytes'
test_data = [
# (original, py2_pickle)
(np.unicode_('\u6f2c'),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n"
"I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n.")),
(np.array([9e123], dtype=np.float64),
asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n"
"p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n"
"p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n"
"I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb.")),
(np.array([(9e123,)], dtype=[('name', float)]),
asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n"
"(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n"
"(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n"
"(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n"
"I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n"
"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb.")),
]
if sys.version_info[:2] >= (3, 4):
# encoding='bytes' was added in Py3.4
for original, data in test_data:
result = pickle.loads(data, encoding='bytes')
assert_equal(result, original)
if isinstance(result, np.ndarray) and result.dtype.names:
for name in result.dtype.names:
assert_(isinstance(name, str))
def test_pickle_dtype(self,level=rlevel):
# Ticket #251
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
# Ticket #265
assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
def test_object_array_from_list(self, level=rlevel):
# Ticket #270
np.array([1, 'A', None]) # Should succeed
def test_multiple_assign(self, level=rlevel):
# Ticket #273
a = np.zeros((3, 1), int)
a[[1, 2]] = 1
def test_empty_array_type(self, level=rlevel):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self, level=rlevel):
dt = np.dtype([('one', '<i4'), ('two', '<i4')])
x = np.array((1, 2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self, level=rlevel):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8, 7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
res1 = arr
if res1.dtype.kind in 'uib':
assert_((res1 == res2).all(), func)
else:
assert_(abs(res1-res2).max() < 1e-8, func)
for func in funcs2:
arr1 = np.random.rand(8, 7)
arr2 = np.random.rand(8, 7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
def test_mem_lexsort_strings(self, level=rlevel):
# Ticket #298
lst = ['abc', 'cde', 'fgh']
np.lexsort((lst,))
def test_fancy_index(self, level=rlevel):
# Ticket #302
x = np.array([1, 2])[np.array([0])]
assert_equal(x.shape, (1,))
def test_recarray_copy(self, level=rlevel):
# Ticket #312
dt = [('x', np.int16), ('y', np.float64)]
ra = np.array([(1, 2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
def test_rec_fromarray(self, level=rlevel):
# Ticket #322
x1 = np.array([[1, 2], [3, 4], [5, 6]])
x2 = np.array(['a', 'dd', 'xyz'])
x3 = np.array([1.1, 2, 3])
np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self, level=rlevel):
x = np.empty((2, 2), object)
x.flat[2] = (1, 2, 3)
assert_equal(x.flat[2], (1, 2, 3))
def test_ndmin_float64(self, level=rlevel):
# Ticket #324
x = np.array([1, 2, 3], dtype=np.float64)
assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
def test_ndmin_order(self, level=rlevel):
# Issue #465 and related checks
assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
def test_mem_axis_minimization(self, level=rlevel):
# Ticket #327
data = np.arange(5)
data = np.add.outer(data, data)
def test_mem_float_imag(self, level=rlevel):
# Ticket #330
np.float64(1.0).imag
def test_dtype_tuple(self, level=rlevel):
# Ticket #334
assert_(np.dtype('i4') == np.dtype(('i4', ())))
def test_dtype_posttuple(self, level=rlevel):
# Ticket #335
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self, level=rlevel):
# Ticket #341
assert_equal(np.array(['X'], 'c'), asbytes('X'))
def test_string_array_size(self, level=rlevel):
# Ticket #342
self.assertRaises(ValueError,
np.array, [['X'], ['X', 'X', 'X']], '|S1')
def test_dtype_repr(self, level=rlevel):
# Ticket #344
dt1 = np.dtype(('uint32', 2))
dt2 = np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self, level=rlevel):
# Make sure reshape order works.
a = np.arange(6).reshape(2, 3, order='F')
assert_equal(a, [[0, 2, 4], [1, 3, 5]])
a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
b = a[:, 1]
assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
def test_reshape_zero_strides(self, level=rlevel):
# Issue #380, test reshaping of zero strided arrays
a = np.ones(1)
a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
assert_(a.reshape(5, 1).strides[0] == 0)
def test_reshape_zero_size(self, level=rlevel):
# GitHub Issue #2700, setting shape failed for 0-sized arrays
a = np.ones((0, 2))
a.shape = (-1, 2)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_reshape_trailing_ones_strides(self):
# GitHub issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
strides_c = (16, 8, 8, 8)
strides_f = (8, 24, 48, 48)
assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
def test_repeat_discont(self, level=rlevel):
# Ticket #352
a = np.arange(12).reshape(4, 3)[:, 2]
assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
def test_array_index(self, level=rlevel):
# Make sure optimization is not called in this case.
a = np.array([1, 2, 3])
a2 = np.array([[1, 2, 3]])
assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])
def test_object_argmax(self, level=rlevel):
a = np.array([1, 2, 3], dtype=object)
assert_(a.argmax() == 2)
def test_recarray_fields(self, level=rlevel):
# Ticket #372
dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
for a in [np.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)]),
np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"),
np.rec.fromarrays([(1, 2), (3, 4)])]:
assert_(a.dtype in [dt0, dt1])
def test_random_shuffle(self, level=rlevel):
# Ticket #374
a = np.arange(5).reshape((5, 1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0), a)
def test_refcount_vdot(self, level=rlevel):
# Changeset #3443
_assert_valid_refcount(np.vdot)
def test_startswith(self, level=rlevel):
ca = np.char.array(['Hi', 'There'])
assert_equal(ca.startswith('H'), [True, False])
def test_noncommutative_reduce_accumulate(self, level=rlevel):
# Ticket #413
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self, level=rlevel):
# Convolve should raise an error for empty input array.
self.assertRaises(ValueError, np.convolve, [], [1])
self.assertRaises(ValueError, np.convolve, [1], [])
def test_multidim_byteswap(self, level=rlevel):
# Ticket #449
r = np.array([(1, (0, 1, 2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256, (0, 256, 512))], r.dtype))
def test_string_NULL(self, level=rlevel):
# Changeset 3557
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self, level=rlevel):
# Ticket #483
r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == asbytes('abc'))
def test_take_output(self, level=rlevel):
# Ensure that 'take' honours output parameter.
x = np.arange(12).reshape((3, 4))
a = np.take(x, [0, 2], axis=1)
b = np.zeros_like(a)
np.take(x, [0, 2], axis=1, out=b)
assert_array_equal(a, b)
def test_take_object_fail(self):
# Issue gh-3001
d = 123.
a = np.array([d, 1], dtype=object)
ref_d = sys.getrefcount(d)
try:
a.take([0, 100])
except IndexError:
pass
assert_(ref_d == sys.getrefcount(d))
def test_array_str_64bit(self, level=rlevel):
# Ticket #501
s = np.array([1, np.nan], dtype=np.float64)
with np.errstate(all='raise'):
np.array_str(s) # Should succeed
def test_frompyfunc_endian(self, level=rlevel):
# Ticket #503
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self, level=rlevel):
# Ticket #514
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s))
def test_arr_transpose(self, level=rlevel):
# Ticket #516
x = np.random.rand(*(2,)*16)
x.transpose(list(range(16))) # Should succeed
def test_string_mergesort(self, level=rlevel):
# Ticket #540
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self, level=rlevel):
# Ticket #546
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
def test_rand_seed(self, level=rlevel):
# Ticket #555
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self, level=rlevel):
# Ticket #562
a = np.zeros(5, dtype=float)
b = np.array(a, dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
def test_dot_negative_stride(self, level=rlevel):
# Ticket #588
x = np.array([[1, 5, 25, 125., 625]])
y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x, z), np.dot(x, y2))
def test_object_casting(self, level=rlevel):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484, 286])
y = np.zeros([484, 286])
x |= y
self.assertRaises(TypeError, rs)
def test_unicode_scalar(self, level=rlevel):
# Ticket #600
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = pickle.loads(pickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
# Ticket #616
for T in ('>f4', '<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0, dtype=dt).dtype, dt)
assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
assert_equal(np.arange(5, dtype=dt).dtype, dt)
def test_bool_flat_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v):
x[(s > 0)] = v
# After removing deprecation, the following are ValueErrors.
# This might seem odd as compared to the value error below. This
# is due to the fact that the new code always uses "nonzero" logic
# and the boolean special case is not taken.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float))
self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float))
# Old special case (different code path):
self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
self.assertRaises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
def test_mem_scalar_indexing(self, level=rlevel):
# Ticket #603
x = np.array([0], dtype=float)
index = np.array(0, dtype=np.int32)
x[index]
def test_binary_repr_0_width(self, level=rlevel):
assert_equal(np.binary_repr(0, width=3), '000')
def test_fromstring(self, level=rlevel):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12, 9, 9])
def test_searchsorted_variable_length(self, level=rlevel):
x = np.array(['a', 'aa', 'b'])
y = np.array(['d', 'e'])
assert_equal(x.searchsorted(y), [3, 3])
def test_string_argsort_with_zeros(self, level=rlevel):
# Check argsort for strings containing zeros.
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
def test_string_sort_with_zeros(self, level=rlevel):
# Check sort for strings containing zeros.
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self, level=rlevel):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
def test_flat_byteorder(self, level=rlevel):
# Ticket #657
x = np.arange(10)
assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
def test_uint64_from_negative(self, level=rlevel):
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
def test_sign_bit(self, level=rlevel):
x = np.array([0, -0.0, 0])
assert_equal(str(np.abs(x)), '[ 0. 0. 0.]')
def test_flat_index_byteswap(self, level=rlevel):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
x = np.array([-1, 0, 1], dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self, level=rlevel):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_copy_detection_corner_case2(self, level=rlevel):
# Ticket #771: strides are not set correctly when reshaping 0-sized
# arrays
b = np.indices((0, 3, 4)).T.reshape(-1, 3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self, level=rlevel):
# Ticket #633
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1-d broadcast slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1-d -> 2-d broadcast slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert_(cnt(a) == cnt0_a + 1)
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
arr[:,:] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
arr[:, 0] = None
assert_(cnt(a) == cnt0_a + 1)
del arr, arr0
# -- 2-d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:, 0] = a
arr[:, 1] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
arr2 = arr[:, 0].copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.flatten()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
tmp = np.concatenate((arr1, arr2))
assert_(cnt(a) == cnt0_a + 5 + 5)
assert_(cnt(b) == cnt0_b + 5 + 5)
tmp = arr1.repeat(3, axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3*5)
tmp = arr1.take([1, 2, 3], axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3)
x = np.array([[0], [1], [0], [1], [1]], int)
tmp = x.choose(arr1, arr2)
assert_(cnt(a) == cnt0_a + 5 + 2)
assert_(cnt(b) == cnt0_b + 5 + 3)
del tmp # Avoid pyflakes unused variable warning
def test_mem_custom_float_to_array(self, level=rlevel):
# Ticket 702
class MyFloat(object):
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp.astype(float) # Should succeed
def test_object_array_refcount_self_assign(self, level=rlevel):
# Ticket #711
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert_(not arr[0].deleted)
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
x = [1, 2, 3]
self.assertRaises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self, level=rlevel):
# Ticket #713
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
def test_mem_0d_array_index(self, level=rlevel):
# Ticket #714
np.zeros(10)[np.array(0)]
def test_floats_from_string(self, level=rlevel):
# Ticket #640, floats from string
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_nonnative_endian_fill(self, level=rlevel):
# Non-native endian arrays were incorrectly filled with scalars
# before r5034.
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self, level=rlevel):
# Test for ticket #551, changeset r5140
x = np.zeros((30, 40))
y = pickle.loads(pickle.dumps(x))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self, level=rlevel):
# Ticket #788, changeset r5155
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
f = open(filename, 'rb')
xp = pickle.load(f, encoding='latin1')
f.close()
else:
f = open(filename)
xp = pickle.load(f)
f.close()
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
def test_compress_small_type(self, level=rlevel):
# Ticket #789, changeset 5217.
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype=np.single)
try:
a.compress([True, False], axis=1, out=b)
raise AssertionError("compress with an out which cannot be "
"safely casted should not return "
"successfully")
except TypeError:
pass
def test_attributes(self, level=rlevel):
# Ticket #791
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
assert_(dat.info == 'jubba')
dat.resize((4, 2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2, 3, 4], [6, 3, 4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32, 0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2, 7).info == 'jubba')
assert_(dat.compress([0, 1, 1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0], 'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32, 0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2, 4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0, 1).info == 'jubba')
assert_(dat.take([2, 3, 5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
# These methods do not preserve subclasses
assert_(type(dat.nonzero()[0]) is np.ndarray)
assert_(type(dat.nonzero()[1]) is np.ndarray)
def test_recarray_tolist(self, level=rlevel):
# Ticket #793, changeset r5215
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_nonscalar_item_method(self):
# Make sure that .item() fails graciously when it should
a = np.arange(5)
assert_raises(ValueError, a.item)
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1', '2', '3']))
assert_equal(a, b)
def test_unaligned_unicode_access(self, level=rlevel):
# Ticket #825
for i in range(1, 9):
msg = 'unicode offset: %d chars' % i
t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])
x = np.array([(asbytes('a'), sixu('b'))], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self, level=rlevel):
# Ticket 794.
with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self, level=rlevel):
# Ticket #674
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
def test_for_object_scalar_creation(self, level=rlevel):
# Ticket #816
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4, 5])
d = np.object_([None, {}, []])
assert_(a is None)
assert_(type(b) is int)
assert_(type(b2) is float)
assert_(type(c) is np.ndarray)
assert_(c.dtype == object)
assert_(d.dtype == object)
def test_array_resize_method_system_error(self):
# Ticket #840 - order should be an invalid keyword.
x = np.array([[0, 1], [2, 3]])
self.assertRaises(TypeError, x.resize, (2, 2), order='C')
def test_for_zero_length_in_choose(self, level=rlevel):
"Ticket #882"
a = np.array(1)
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_errobj_reference_leak(self, level=rlevel):
# Ticket #955
with np.errstate(all="ignore"):
z = int(0)
p = np.int32(-1)
gc.collect()
n_before = len(gc.get_objects())
z**p # this shouldn't leak a reference to errobj
gc.collect()
n_after = len(gc.get_objects())
assert_(n_before >= n_after, (n_before, n_after))
def test_void_scalar_with_titles(self, level=rlevel):
# No ticket
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
def test_void_scalar_constructor(self):
#Issue #1550
#Create test string data, construct void scalar from data and assert
#that void scalar contains original data.
test_string = np.array("test")
test_string_void_scalar = np.core.multiarray.scalar(
np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes())
assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
#Create record scalar, construct from data and assert that
#reconstructed scalar is correct.
test_record = np.ones((), "i,i")
test_record_void_scalar = np.core.multiarray.scalar(
test_record.dtype, test_record.tobytes())
assert_(test_record_void_scalar == test_record)
#Test pickle and unpickle of void and record scalars
assert_(pickle.loads(pickle.dumps(test_string)) == test_string)
assert_(pickle.loads(pickle.dumps(test_record)) == test_record)
def test_blasdot_uninitialized_memory(self):
# Ticket #950
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in range(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
y.resize((0, n))
# `dot` should just return zero (m,n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
# Regression test for #1061.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
# Regression test for #1062.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
# Ticket #1058
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_array_from_sequence_scalar_array(self):
# Ticket #1078: segfaults when creating an array with a sequence of
# 0d arrays.
a = np.array((np.ones(2), np.array(2)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
# Ticket #1081: weird array with strange input...
t = np.array([np.array([]), np.array(0, object)])
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
# Ticket #1080.
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
# Ticket #1106.
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
# Ticket #1217, don't segfault.
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
# Ticket #1235.
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
# Ticket #1240.
a = np.array([[sixu('abc'), sixu('\u03a3')],
[sixu('asdf'), sixu('erw')]],
dtype='U')
self.assertRaises(UnicodeEncodeError, np.array, a, 'S4')
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', sixu('123')])
assert_(a.itemsize == 16)
a = np.array([sixu('123'), '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', sixu('123'), '12345'])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('12345')])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('1234')])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
# Ticket #1198 and #1267
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
# Ticket #1267
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
# Ticket #1267
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
# Ticket #1259 and gh-441
for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
z = np.array([2.2-1.1j], dtype)
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype.newbyteorder()))
else:
# big-endian machine
assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype))
# double check real and imaginary parts:
assert_equal(x.real, y.real.byteswap())
assert_equal(x.imag, y.imag.byteswap())
def test_structured_arrays_with_objects1(self):
# Ticket #1299
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0, 1] == x[0, 0])
def test_structured_arrays_with_objects2(self):
# Ticket #1299 second test
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(sys.getrefcount(strb) == numb)
assert_(sys.getrefcount(stra) == numa + 2)
def test_duplicate_title_and_name(self):
# Ticket #1254
dtspec = [(('a', 'a'), 'i'), ('b', 'i')]
self.assertRaises(ValueError, np.dtype, dtspec)
def test_signed_integer_division_overflow(self):
# Ticket #1317.
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
with np.errstate(divide="ignore"):
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1, 2, 3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
a = np.array([], dtypes[0])
failures = []
# ignore complex warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls, i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1, 2, 3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=np.float)
a.shape = (4, 4)
lut = np.ones((5 + 3, 4), np.float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tobytes())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, asbytes("\x01\x02\x03"))
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
assert_equal(float(x), float(x.real))
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_complex_boolean_cast(self):
# Ticket #2218
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)
assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))
assert_(np.any(x))
assert_(np.all(x[1:]))
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([], ['?', '?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
# The second use of itemsize was throwing an exception because in
# ctors.c, discover_itemsize was calling PyObject_Length without
# checking the return code. This failed to get the length of the
# number 2, and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_squeeze_contiguous(self):
# Similar to GitHub issue #387
a = np.zeros((1, 2)).squeeze()
b = np.zeros((2, 2, 2), order='F')[:,:, ::2].squeeze()
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous)
def test_reduce_contiguous(self):
# GitHub issue #387
a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
b = np.add.reduce(np.zeros((2, 1, 2)), 1)
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.c_contiguous)
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(TypeError, int, a)
assert_raises(TypeError, long, a)
assert_raises(TypeError, float, a)
assert_raises(TypeError, oct, a)
assert_raises(TypeError, hex, a)
# Test the same for a circular reference.
b = np.array(a, dtype=object)
a[()] = b
assert_raises(TypeError, int, a)
# Numpy has no tp_traverse currently, so circular references
# cannot be detected. So resolve it:
a[()] = 0
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
a = np.array(object(), dtype=object)
np.copyto(a, a)
assert_equal(sys.getrefcount(a[()]), 2)
a[()].__class__ # will segfault if object was deleted
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1, 0]), [])
def test_objectarray_setfield(self):
# Setfield should not overwrite Object fields with non-Object data
x = np.array([1, 2, 3], dtype=object)
assert_raises(TypeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = asbytes("hello1")
s2 = asbytes("hello2")
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
def test_string_astype(self):
"Ticket #1748"
s1 = asbytes('black')
s2 = asbytes('white')
s3 = asbytes('other')
a = np.array([[s1], [s2], [s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
# Ticket #1756
s = asbytes('0123456789abcdef')
a = np.array([s]*5)
for i in range(1, 17):
a1 = np.array(a, "|S%d" % i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"Ticket #1760"
r = np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_alignment_update(self):
# Check that alignment flag is updated on stride setting
a = np.arange(10)
assert_(a.flags.aligned)
a.strides = 3
assert_(not a.flags.aligned)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except:
raise AssertionError
def test_ticket_1608(self):
"x.flat shouldn't modify data"
x = np.array([[1, 2], [3, 4]]).T
np.array(x.flat)
assert_equal(x, [[1, 3], [2, 4]])
def test_pickle_string_overwrite(self):
import re
data = np.array([1], dtype='b')
blob = pickle.dumps(data, protocol=1)
data = pickle.loads(blob)
# Check that loads does not clobber interned strings
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
data[0] = 0xbb
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
def test_pickle_bytes_overwrite(self):
if sys.version_info[0] >= 3:
data = np.array([1], dtype='b')
data = pickle.loads(pickle.dumps(data))
data[0] = 0xdd
bytestring = "\x01 ".encode('ascii')
assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
def test_pickle_py2_array_latin1_hack(self):
# Check that unpickling hacks in Py3 that support
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(numpy.array([129], dtype='b'))
data = asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
"p13\ntp14\nb.")
if sys.version_info[0] >= 3:
# This should work:
result = pickle.loads(data, encoding='latin1')
assert_array_equal(result, np.array([129], dtype='b'))
# Should not segfault:
assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
def test_pickle_py2_scalar_latin1_hack(self):
# Check that scalar unpickling hack in Py3 that supports
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(...)
datas = [
# (original, python2_pickle, koi8r_validity)
(np.unicode_('\u6bd2'),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
'invalid'),
(np.float64(9e123),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
'invalid'),
(np.bytes_(asbytes('\x9c')), # different 8-bit code point in KOI8-R vs latin1
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
"tp8\nRp9\n."),
'different'),
]
if sys.version_info[0] >= 3:
for original, data, koi8r_validity in datas:
result = pickle.loads(data, encoding='latin1')
assert_equal(result, original)
# Decoding under non-latin1 encoding (e.g.) KOI8-R can
# produce bad results, but should not segfault.
if koi8r_validity == 'different':
# Unicode code points happen to lie within latin1,
# but are different in koi8-r, resulting to silent
# bogus results
result = pickle.loads(data, encoding='koi8-r')
assert_(result != original)
elif koi8r_validity == 'invalid':
# Unicode code points outside latin1, so results
# to an encoding exception
assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
else:
raise ValueError(koi8r_validity)
def test_structured_type_to_object(self):
a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0, 1)
a_obj[1] = (3, 2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2, 2), dtype=object)
a[...] = [[1, 2]]
assert_equal(a, [[1, 2], [1, 2]])
def test_memoryleak(self):
# Ticket #1917 - ensure that array data doesn't leak
for i in range(1000):
# 100MB times 1000 would give 100GB of memory usage if it leaks
a = np.empty((100000000,), dtype='i1')
del a
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
np.add.reduce(a)
assert_equal(sys.getrefcount(a), acnt)
def test_search_sorted_invalid_arguments(self):
# Ticket #2021, should not segfault.
x = np.arange(0, 4, dtype='datetime64[D]')
assert_raises(TypeError, x.searchsorted, 1)
def test_string_truncation(self):
# Ticket #1990 - Data can be truncated in creation of an array from a
# mixed sequence of numeric values and strings
for val in [True, 1234, 123.4, complex(1, 234)]:
for tostr in [asunicode, asbytes]:
b = np.array([val, tostr('xx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xx'), val])
assert_equal(tostr(b[1]), tostr(val))
# test also with longer strings
b = np.array([val, tostr('xxxxxxxxxx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xxxxxxxxxx'), val])
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
# adjusted for Numpy's four byte unicode.
if sys.version_info[0] >= 3:
a = np.array(['abcd'])
else:
a = np.array([sixu('abcd')])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
# Ticket #2063 must always choose stable sort for argsort to
# get consistent results
v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
res = np.unique(v, return_index=True)
tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
assert_equal(res, tgt)
def test_unicode_alloc_dealloc_match(self):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
a = np.array(['abc'], dtype=np.unicode)[0]
del a
def test_refcount_error_in_clip(self):
# Ticket #1588
a = np.zeros((2,), dtype='>i2').clip(min=0)
x = a + a
# This used to segfault:
y = str(x)
# Check the final string:
assert_(y == "[0 0]")
def test_searchsorted_wrong_dtype(self):
# Ticket #2189, it used to segfault, so we check that it raises the
# proper exception.
a = np.array([('a', 1)], dtype='S1, int')
assert_raises(TypeError, np.searchsorted, a, 1.2)
# Ticket #2066, similar problem:
dtype = np.format_parser(['i4', 'i4'], [], [])
a = np.recarray((2, ), dtype)
assert_raises(TypeError, np.searchsorted, a, 1)
def test_complex64_alignment(self):
# Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
dtt = np.complex64
arr = np.arange(10, dtype=dtt)
# 2D array
arr2 = np.reshape(arr, (2, 5))
# Fortran write followed by (C or F) read caused bus error
data_str = arr2.tobytes('F')
data_back = np.ndarray(arr2.shape,
arr2.dtype,
buffer=data_str,
order='F')
assert_array_equal(arr2, data_back)
def test_structured_count_nonzero(self):
arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
count = np.count_nonzero(arr)
assert_equal(count, 0)
def test_copymodule_preserves_f_contiguity(self):
a = np.empty((2, 2), order='F')
b = copy.copy(a)
c = copy.deepcopy(a)
assert_(b.flags.fortran)
assert_(b.flags.f_contiguous)
assert_(c.flags.fortran)
assert_(c.flags.f_contiguous)
def test_fortran_order_buffer(self):
import numpy as np
a = np.array([['Hello', 'Foob']], dtype='U5', order='F')
arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)
arr2 = np.array([[[sixu('H'), sixu('e'), sixu('l'), sixu('l'), sixu('o')],
[sixu('F'), sixu('o'), sixu('o'), sixu('b'), sixu('')]]])
assert_array_equal(arr, arr2)
def test_assign_from_sequence_error(self):
# Ticket #4024.
arr = np.array([1, 2, 3])
assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])
arr.__setitem__(slice(None), [9])
assert_equal(arr, [9, 9, 9])
def test_format_on_flex_array_element(self):
# Ticket #4369.
dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])
arr = np.array([('2000-01-01', 1)], dt)
formatted = '{0}'.format(arr[0])
assert_equal(formatted, str(arr[0]))
def test_deepcopy_on_0d_array(self):
# Ticket #3311.
arr = np.array(3)
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
assert_equal(arr.shape, arr_cp.shape)
assert_equal(int(arr), int(arr_cp))
self.assertTrue(arr is not arr_cp)
self.assertTrue(isinstance(arr_cp, type(arr)))
def test_bool_subscript_crash(self):
# gh-4494
c = np.rec.array([(1, 2, 3), (4, 5, 6)])
masked = c[np.array([True, False])]
base = masked.base
del masked, c
base.dtype
def test_richcompare_crash(self):
# gh-4613
|
def test_richcompare_scalar_and_subclass(self):
# gh-4709
class Foo(np.ndarray):
def __eq__(self, other):
return "OK"
x = np.array([1,2,3]).view(Foo)
assert_equal(10 == x, "OK")
assert_equal(np.int32(10) == x, "OK")
assert_equal(np.array([10]) == x, "OK")
def test_pickle_empty_string(self):
# gh-3926
import pickle
test_string = np.string_('')
assert_equal(pickle.loads(pickle.dumps(test_string)), test_string)
def test_frompyfunc_many_args(self):
# gh-5672
def passer(*args):
pass
assert_raises(ValueError, np.frompyfunc, passer, 32, 1)
def test_repeat_broadcasting(self):
# gh-5743
a = np.arange(60).reshape(3, 4, 5)
for axis in chain(range(-a.ndim, a.ndim), [None]):
assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))
def test_frompyfunc_nout_0(self):
# gh-2014
def f(x):
x[0], x[-1] = x[-1], x[0]
uf = np.frompyfunc(f, 1, 0)
a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]])
assert_equal(uf(a), ())
assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]])
def test_leak_in_structured_dtype_comparison(self):
# gh-6250
recordtype = np.dtype([('a', np.float64),
('b', np.int32),
('d', (np.str, 5))])
# Simple case
a = np.zeros(2, dtype=recordtype)
for i in range(100):
a == a
assert_(sys.getrefcount(a) < 10)
# The case in the bug report.
before = sys.getrefcount(a)
u, v = a[0], a[1]
u == v
del u, v
gc.collect()
after = sys.getrefcount(a)
assert_equal(before, after)
def test_empty_percentile(self):
# gh-6530 / gh-6553
assert_array_equal(np.percentile(np.arange(10), []), np.array([]))
def test_void_compare_segfault(self):
# gh-6922. The following should not segfault
a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])
a.sort()
if __name__ == "__main__":
run_module_suite()
| import operator as op
# dummy class where __array__ throws exception
class Foo(object):
__array_priority__ = 1002
def __array__(self,*args,**kwargs):
raise Exception()
rhs = Foo()
lhs = np.array(1)
for f in [op.lt, op.le, op.gt, op.ge]:
if sys.version_info[0] >= 3:
assert_raises(TypeError, f, lhs, rhs)
else:
f(lhs, rhs)
assert_(not op.eq(lhs, rhs))
assert_(op.ne(lhs, rhs)) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.